From 8c694602582aa79985e3233d6f7f960dd2a012dd Mon Sep 17 00:00:00 2001 From: Xiwei Pan Date: Mon, 30 Mar 2026 04:03:32 +0800 Subject: [PATCH 01/21] feat: add MinimumInternalMacroDataCompression model (#442) Implement the internal macro data compression problem (GJ SR23) with direct ILP reduction, CLI support, canonical example, and paper entry. Co-Authored-By: Claude Opus 4.6 (1M context) --- docs/paper/reductions.typ | 77 +++++ problemreductions-cli/src/cli.rs | 1 + problemreductions-cli/src/commands/create.rs | 74 ++++- ...minimum_internal_macro_data_compression.rs | 255 +++++++++++++++ src/models/misc/mod.rs | 4 + src/models/mod.rs | 10 +- ...minimuminternalmacrodatacompression_ilp.rs | 305 ++++++++++++++++++ src/rules/mod.rs | 3 + ...minimum_internal_macro_data_compression.rs | 174 ++++++++++ ...minimuminternalmacrodatacompression_ilp.rs | 120 +++++++ 10 files changed, 1008 insertions(+), 15 deletions(-) create mode 100644 src/models/misc/minimum_internal_macro_data_compression.rs create mode 100644 src/rules/minimuminternalmacrodatacompression_ilp.rs create mode 100644 src/unit_tests/models/misc/minimum_internal_macro_data_compression.rs create mode 100644 src/unit_tests/rules/minimuminternalmacrodatacompression_ilp.rs diff --git a/docs/paper/reductions.typ b/docs/paper/reductions.typ index 00bb1bf3..b95445c9 100644 --- a/docs/paper/reductions.typ +++ b/docs/paper/reductions.typ @@ -197,6 +197,7 @@ "SteinerTree": [Steiner Tree], "SteinerTreeInGraphs": [Steiner Tree in Graphs], "MinimumExternalMacroDataCompression": [Minimum External Macro Data Compression], + "MinimumInternalMacroDataCompression": [Minimum Internal Macro Data Compression], "StringToStringCorrection": [String-to-String Correction], "StrongConnectivityAugmentation": [Strong Connectivity Augmentation], "SubgraphIsomorphism": [Subgraph Isomorphism], @@ -5066,6 +5067,82 @@ A classical NP-complete problem from Garey and Johnson @garey1979[Ch.~3, p.~76], _Solution extraction._ Read $D$ from the $d_(j,c)$ indicators. Walk through the active segments (via $ell_i$ and $p_(i,lambda,delta)$) to reconstruct $C$. ] +#{ + let x = load-model-example("MinimumInternalMacroDataCompression") + let alpha-size = x.instance.alphabet_size + let s = x.instance.string + let n = s.len() + let h = x.instance.pointer_cost + let alpha-map = range(alpha-size).map(i => str.from-unicode(97 + i)) + let s-str = s.map(c => alpha-map.at(c)).join("") + let opt-val = metric-value(x.optimal_value) + [ + #problem-def("MinimumInternalMacroDataCompression")[ + Given a finite alphabet $Sigma$ of size $k$, a string $s in Sigma^*$ of length $n$, and a pointer cost $h in ZZ^+$, find a single compressed string $C in (Sigma union {"pointers"})^*$ such that $s$ can be obtained from $C$ by resolving all pointer references _within $C$ itself_ (left-to-right), minimizing the total cost $|C| + (h - 1) times$ (number of pointer occurrences in $C$). + ][ + A classical NP-hard data compression problem, listed as SR23 in Garey and Johnson @garey1979. Unlike the external variant (@def:MinimumExternalMacroDataCompression), there is no separate dictionary --- the compressed string $C$ serves as both dictionary and output, with pointers referencing substrings within $C$ itself. #cite(, form: "prose") proved NP-completeness via transformation from Vertex Cover. #cite(, form: "prose") showed that NP-completeness persists even when $h$ is any fixed integer $gt.eq 2$. The internal macro model is closely related to the smallest grammar problem, which is APX-hard @charikar2005.#footnote[No algorithm improving on brute-force enumeration is known for optimal internal macro compression.] + + *Example.* Let $Sigma = {#alpha-map.join(", ")}$ and $s = #s-str$ (length #n) with pointer cost $h = #h$. + + #pred-commands( + "pred create --example MinimumInternalMacroDataCompression -o min-imdc.json", + "pred solve min-imdc.json", + "pred evaluate min-imdc.json --config " + x.optimal_config.map(str).join(","), + ) + + #figure({ + let blue = graph-colors.at(0) + let green = graph-colors.at(1) + let cell(ch, highlight: false, ptr: false) = { + let fill = if ptr { green.transparentize(70%) } else if highlight { blue.transparentize(70%) } else { white } + box(width: 0.5cm, height: 0.55cm, fill: fill, stroke: 0.5pt + luma(120), + align(center + horizon, text(8pt, weight: "bold", ch))) + } + let ptr-cell(label) = { + box(width: 1.5cm, height: 0.55cm, fill: green.transparentize(70%), stroke: 0.5pt + luma(120), + align(center + horizon, text(7pt, weight: "bold", label))) + } + // Source string + // C = [a, b, c, ptr(0), ptr(0), EOS, ...] + align(center, stack(dir: ttb, spacing: 0.5cm, + // Source string + stack(dir: ltr, spacing: 0pt, + box(width: 1.5cm, height: 0.5cm, align(right + horizon, text(8pt)[$s: quad$])), + ..s.map(c => cell(alpha-map.at(c))), + ), + // Compressed string C + stack(dir: ltr, spacing: 0pt, + box(width: 1.5cm, height: 0.5cm, align(right + horizon, text(8pt)[$C: quad$])), + ..range(alpha-size).map(c => cell(alpha-map.at(c), highlight: true)), + ptr-cell[$arrow.r C[0..#alpha-size]$], + ptr-cell[$arrow.r C[0..#alpha-size]$], + ), + )) + }, + caption: [Minimum Internal Macro Data Compression: with $s = #s-str$ (length #n) and pointer cost $h = #h$, the optimal self-referencing compression $C$ starts with #alpha-size literals, then uses 2 pointers back to $C[0..#alpha-size]$, achieving cost $5 + (#h - 1) times 2 = #opt-val$ vs.~uncompressed cost #n.], + ) + + The compressed string $C$ has #alpha-size literal symbols followed by 2 pointers, each referencing $C[0..#alpha-size]$ to copy "#alpha-map.join("")". Each pointer costs $h = #h$ (the pointer token plus $h - 1 = #(h - 1)$ extra), so the total cost is $|C| + (h - 1) times |"pointers"| = 5 + #(h - 1) times 2 = #opt-val$, saving $#(n - int(opt-val))$ over the uncompressed cost of #n. + ] + ] +} + +#reduction-rule("MinimumInternalMacroDataCompression", "ILP")[ + The self-referencing compression problem is formulated as a binary ILP. Since there is no separate dictionary, only the string partition structure needs to be modeled. The partition is expressed as a flow on a DAG whose nodes are string positions and whose arcs are candidate segments. +][ + _Construction._ For alphabet $Sigma$ of size $k$, string $s$ of length $n$, and pointer cost $h$: + + _Variables:_ (1) Binary $ell_i in {0,1}$ for each string position $i in {0, dots, n-1}$: $ell_i = 1$ iff position $i$ is covered by a literal. (2) Binary $p_(i,lambda,r) in {0,1}$ for each valid triple $(i, lambda, r)$ where $r + lambda <= i$ and $s[r..r+lambda) = s[i..i+lambda)$: $p_(i,lambda,r) = 1$ iff positions $[i, i+lambda)$ are covered by a pointer referencing the decoded output starting at source position $r$. + + _Constraints:_ Partition flow: the segments form a partition of ${0, dots, n-1}$ via flow conservation on nodes $0, dots, n$. The string-matching constraint ($s[r..r+lambda) = s[i..i+lambda)$) and the precedence constraint ($r + lambda <= i$) are enforced structurally by only generating valid triples. + + _Objective:_ Minimize $sum_i ell_i + h sum_(i,lambda,r) p_(i,lambda,r)$. + + _Correctness._ ($arrow.r.double$) An optimal compressed string $C$ determines a feasible ILP assignment: activate the literal or pointer variable for each segment in the partition. The flow is satisfied by construction. ($arrow.l.double$) Any feasible ILP solution defines a valid partition of $s$ into literal and pointer segments with cost equal to the objective. + + _Solution extraction._ Walk through the active segments (via $ell_i$ and $p_(i,lambda,r)$) to reconstruct $C$, mapping source reference positions to compressed-string positions. +] + #{ let x = load-model-example("MinimumFeedbackArcSet") let nv = x.instance.graph.num_vertices diff --git a/problemreductions-cli/src/cli.rs b/problemreductions-cli/src/cli.rs index 8a134445..a20f5321 100644 --- a/problemreductions-cli/src/cli.rs +++ b/problemreductions-cli/src/cli.rs @@ -315,6 +315,7 @@ Flags by problem type: SequencingToMinimizeWeightedCompletionTime --lengths, --weights [--precedence-pairs] SequencingToMinimizeWeightedTardiness --sizes, --weights, --deadlines, --bound MinimumExternalMacroDataCompression --string, --pointer-cost [--alphabet-size] + MinimumInternalMacroDataCompression --string, --pointer-cost [--alphabet-size] SCS --strings [--alphabet-size] StringToStringCorrection --source-string, --target-string, --bound [--alphabet-size] D2CIF --arcs, --capacities, --source-1, --sink-1, --source-2, --sink-2, --requirement-1, --requirement-2 diff --git a/problemreductions-cli/src/commands/create.rs b/problemreductions-cli/src/commands/create.rs index f766e542..0276d363 100644 --- a/problemreductions-cli/src/commands/create.rs +++ b/problemreductions-cli/src/commands/create.rs @@ -25,14 +25,15 @@ use problemreductions::models::misc::{ ConjunctiveBooleanQuery, ConsistencyOfDatabaseFrequencyTables, EnsembleComputation, ExpectedRetrievalCost, FlowShopScheduling, FrequencyTable, GroupingBySwapping, IntExpr, IntegerExpressionMembership, JobShopScheduling, KnownValue, KthLargestMTuple, - LongestCommonSubsequence, MinimumExternalMacroDataCompression, MinimumTardinessSequencing, - MultiprocessorScheduling, PaintShop, PartiallyOrderedKnapsack, ProductionPlanning, QueryArg, + LongestCommonSubsequence, MinimumExternalMacroDataCompression, + MinimumInternalMacroDataCompression, MinimumTardinessSequencing, MultiprocessorScheduling, + PaintShop, PartiallyOrderedKnapsack, ProductionPlanning, QueryArg, RectilinearPictureCompression, RegisterSufficiency, ResourceConstrainedScheduling, - SchedulingToMinimizeWeightedCompletionTime, - SchedulingWithIndividualDeadlines, SequencingToMinimizeMaximumCumulativeCost, - SequencingToMinimizeWeightedCompletionTime, SequencingToMinimizeWeightedTardiness, - SequencingWithReleaseTimesAndDeadlines, SequencingWithinIntervals, ShortestCommonSupersequence, - StringToStringCorrection, SubsetSum, SumOfSquaresPartition, ThreePartition, TimetableDesign, + SchedulingToMinimizeWeightedCompletionTime, SchedulingWithIndividualDeadlines, + SequencingToMinimizeMaximumCumulativeCost, SequencingToMinimizeWeightedCompletionTime, + SequencingToMinimizeWeightedTardiness, SequencingWithReleaseTimesAndDeadlines, + SequencingWithinIntervals, ShortestCommonSupersequence, StringToStringCorrection, SubsetSum, + SumOfSquaresPartition, ThreePartition, TimetableDesign, }; use problemreductions::models::BiconnectivityAugmentation; use problemreductions::prelude::*; @@ -759,7 +760,7 @@ fn example_for(canonical: &str, graph_type: Option<&str>) -> &'static str { "--strings \"010110;100101;001011\" --alphabet-size 2" } "GroupingBySwapping" => "--string \"0,1,2,0,1,2\" --bound 5", - "MinimumExternalMacroDataCompression" => { + "MinimumExternalMacroDataCompression" | "MinimumInternalMacroDataCompression" => { "--string \"0,1,0,1\" --pointer-cost 2 --alphabet-size 2" } "MinimumCardinalityKey" => { @@ -897,8 +898,10 @@ fn help_flag_hint( "raw strings: \"ABAC;BACA\" or symbol lists: \"0,1,0;1,0,1\"" } ("GroupingBySwapping", "string") => "symbol list: \"0,1,2,0,1,2\"", - ("MinimumExternalMacroDataCompression", "string") => "symbol list: \"0,1,0,1\"", - ("MinimumExternalMacroDataCompression", "pointer_cost") => "positive integer: 2", + ("MinimumExternalMacroDataCompression", "string") + | ("MinimumInternalMacroDataCompression", "string") => "symbol list: \"0,1,0,1\"", + ("MinimumExternalMacroDataCompression", "pointer_cost") + | ("MinimumInternalMacroDataCompression", "pointer_cost") => "positive integer: 2", ("ShortestCommonSupersequence", "strings") => "symbol lists: \"0,1,2;1,2,0\"", ("MultipleChoiceBranching", "partition") => "semicolon-separated groups: \"0,1;2,3\"", ("IntegralFlowHomologousArcs", "homologous_pairs") => { @@ -3215,6 +3218,57 @@ pub fn create(args: &CreateArgs, out: &OutputConfig) -> Result<()> { ) } + // MinimumInternalMacroDataCompression + "MinimumInternalMacroDataCompression" => { + let usage = "Usage: pred create MinimumInternalMacroDataCompression --string \"0,1,0,1\" --pointer-cost 2 [--alphabet-size 2]"; + let string_str = args.string.as_deref().ok_or_else(|| { + anyhow::anyhow!("MinimumInternalMacroDataCompression requires --string\n\n{usage}") + })?; + let pointer_cost = args.pointer_cost.ok_or_else(|| { + anyhow::anyhow!( + "MinimumInternalMacroDataCompression requires --pointer-cost\n\n{usage}" + ) + })?; + anyhow::ensure!( + pointer_cost > 0, + "--pointer-cost must be a positive integer\n\n{usage}" + ); + + let string: Vec = if string_str.trim().is_empty() { + Vec::new() + } else { + string_str + .split(',') + .map(|value| { + value + .trim() + .parse::() + .context("invalid symbol index") + }) + .collect::>>()? + }; + let inferred = string.iter().copied().max().map_or(0, |value| value + 1); + let alphabet_size = args.alphabet_size.unwrap_or(inferred); + anyhow::ensure!( + alphabet_size >= inferred, + "--alphabet-size {} is smaller than max symbol + 1 ({}) in the input string", + alphabet_size, + inferred + ); + anyhow::ensure!( + alphabet_size > 0 || string.is_empty(), + "MinimumInternalMacroDataCompression requires a positive alphabet for non-empty strings.\n\n{usage}" + ); + ( + ser(MinimumInternalMacroDataCompression::new( + alphabet_size, + string, + pointer_cost, + ))?, + resolved_variant.clone(), + ) + } + // ClosestVectorProblem "ClosestVectorProblem" => { let basis_str = args.basis.as_deref().ok_or_else(|| { diff --git a/src/models/misc/minimum_internal_macro_data_compression.rs b/src/models/misc/minimum_internal_macro_data_compression.rs new file mode 100644 index 00000000..15f76309 --- /dev/null +++ b/src/models/misc/minimum_internal_macro_data_compression.rs @@ -0,0 +1,255 @@ +//! Minimum Internal Macro Data Compression problem implementation. +//! +//! Given an alphabet Σ, a string s ∈ Σ*, and a pointer cost h, +//! find a single compressed string C ∈ (Σ ∪ {pointers})* minimizing the cost +//! |C| + (h−1) × (number of pointer occurrences in C), +//! such that s can be obtained from C by resolving all pointer references +//! within C itself (left-to-right, greedy longest match). +//! +//! Unlike external macro compression, there is no separate dictionary — the +//! compressed string C serves as both dictionary and output. +//! +//! This problem is NP-hard (Storer, 1977; Storer & Szymanski, 1978). +//! Reference: Garey & Johnson A4 SR23. + +use crate::registry::{FieldInfo, ProblemSchemaEntry}; +use crate::traits::Problem; +use crate::types::Min; +use serde::{Deserialize, Serialize}; + +inventory::submit! { + ProblemSchemaEntry { + name: "MinimumInternalMacroDataCompression", + display_name: "Minimum Internal Macro Data Compression", + aliases: &[], + dimensions: &[], + module_path: module_path!(), + description: "Find minimum-cost self-referencing compression of a string with embedded pointers", + fields: &[ + FieldInfo { name: "alphabet_size", type_name: "usize", description: "Size of the alphabet (symbols indexed 0..alphabet_size)" }, + FieldInfo { name: "string", type_name: "Vec", description: "Source string as symbol indices" }, + FieldInfo { name: "pointer_cost", type_name: "usize", description: "Pointer cost h (each pointer adds h−1 extra to the cost)" }, + ], + } +} + +/// Minimum Internal Macro Data Compression problem. +/// +/// Given an alphabet of size `k`, a string `s` over `{0, ..., k-1}`, and +/// a pointer cost `h`, find a compressed string C that minimizes +/// cost = |C| + (h−1) × (pointer count in C), where C uses itself as both +/// dictionary and compressed output. +/// +/// # Representation +/// +/// The configuration is a vector of `string_len` entries. Each entry is: +/// - A symbol index in `{0, ..., alphabet_size-1}` (literal) +/// - `alphabet_size` (end-of-string marker; positions after this are padding) +/// - A value in `{alphabet_size+1, ..., alphabet_size + string_len}`, +/// encoding a pointer to C\[v − alphabet_size − 1\] with greedy longest match. +/// +/// During decoding, pointers are resolved left-to-right. A pointer at position +/// i referencing position j (where j < i in the decoded output) copies symbols +/// from the already-decoded output starting at j using greedy longest match. +/// +/// # Example +/// +/// ``` +/// use problemreductions::models::misc::MinimumInternalMacroDataCompression; +/// use problemreductions::{Problem, Solver, BruteForce}; +/// +/// // Alphabet {a, b}, string "abab", pointer cost h=2 +/// let problem = MinimumInternalMacroDataCompression::new(2, vec![0, 1, 0, 1], 2); +/// let solver = BruteForce::new(); +/// let solution = solver.find_witness(&problem); +/// assert!(solution.is_some()); +/// ``` +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct MinimumInternalMacroDataCompression { + alphabet_size: usize, + string: Vec, + pointer_cost: usize, +} + +impl MinimumInternalMacroDataCompression { + /// Create a new MinimumInternalMacroDataCompression instance. + /// + /// # Panics + /// + /// Panics if `alphabet_size` is 0 and the string is non-empty, or if + /// any symbol in the string is >= `alphabet_size`, or if `pointer_cost` is 0. + pub fn new(alphabet_size: usize, string: Vec, pointer_cost: usize) -> Self { + assert!( + alphabet_size > 0 || string.is_empty(), + "alphabet_size must be > 0 when the string is non-empty" + ); + assert!( + string + .iter() + .all(|&s| s < alphabet_size || alphabet_size == 0), + "all symbols must be less than alphabet_size" + ); + assert!(pointer_cost > 0, "pointer_cost must be positive"); + Self { + alphabet_size, + string, + pointer_cost, + } + } + + /// Returns the length of the source string. + pub fn string_len(&self) -> usize { + self.string.len() + } + + /// Returns the alphabet size. + pub fn alphabet_size(&self) -> usize { + self.alphabet_size + } + + /// Returns the pointer cost h. + pub fn pointer_cost(&self) -> usize { + self.pointer_cost + } + + /// Returns the source string. + pub fn string(&self) -> &[usize] { + &self.string + } + + /// Decode the compressed string C and return the decoded string, + /// the active length of C, and the pointer count. + /// Returns None if decoding fails (invalid pointer, circular reference, etc.). + fn decode(&self, config: &[usize]) -> Option<(Vec, usize, usize)> { + let n = self.string.len(); + let k = self.alphabet_size; + let eos = k; // end-of-string marker + + // Find active length: prefix before first end-of-string marker + let active_len = config.iter().position(|&v| v == eos).unwrap_or(n); + + // Verify contiguous: all after first EOS must be EOS or padding + for &v in &config[active_len..] { + if v != eos { + return None; + } + } + + // Decode left-to-right. A pointer at compressed position c_idx + // referencing C[j] copies from the decoded output that existed + // before this pointer (no overlapping/runaway copy). + let mut decoded = Vec::new(); + let mut pointer_count: usize = 0; + + for &v in &config[..active_len] { + if v < k { + // Literal symbol + decoded.push(v); + } else if v > k { + // Pointer: references C[ref_pos] in the compressed string + let ref_pos = v - k - 1; + if ref_pos >= decoded.len() { + return None; // pointer references undecoded position + } + // Greedy longest match from decoded[ref_pos..copy_start] + // (only pre-existing decoded content, no overlapping copy) + let copy_start = decoded.len(); + let mut matched = 0; + while copy_start + matched < n { + let src_idx = ref_pos + matched; + if src_idx >= copy_start { + break; // cannot read beyond pre-existing content + } + if decoded[src_idx] != self.string[copy_start + matched] { + break; + } + decoded.push(decoded[src_idx]); + matched += 1; + } + if matched == 0 { + return None; // pointer must copy at least one symbol + } + pointer_count += 1; + } else { + // v == eos, but we filtered those out above + return None; + } + } + + Some((decoded, active_len, pointer_count)) + } +} + +impl Problem for MinimumInternalMacroDataCompression { + const NAME: &'static str = "MinimumInternalMacroDataCompression"; + type Value = Min; + + fn variant() -> Vec<(&'static str, &'static str)> { + crate::variant_params![] + } + + fn dims(&self) -> Vec { + let n = self.string.len(); + let domain = self.alphabet_size + n + 1; // literals + EOS + pointers + vec![domain; n] + } + + fn evaluate(&self, config: &[usize]) -> Min { + let n = self.string.len(); + if config.len() != n { + return Min(None); + } + + // Handle empty string + if n == 0 { + return Min(Some(0)); + } + + match self.decode(config) { + Some((decoded, active_len, pointer_count)) => { + if decoded != self.string { + Min(None) + } else { + let cost = active_len + (self.pointer_cost - 1) * pointer_count; + Min(Some(cost)) + } + } + None => Min(None), + } + } +} + +crate::declare_variants! { + default MinimumInternalMacroDataCompression => "(alphabet_size + string_len + 1) ^ string_len", +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_model_example_specs() -> Vec { + // Issue #442 example: alphabet {a,b,c} (3), s="abcabcabc" (9), h=2 + // Optimal: C = [a, b, c, ptr(0), ptr(0), EOS, EOS, EOS, EOS] + // active_len = 5, pointers = 2 + // cost = 5 + (2-1)*2 = 7 + // + // Config encoding: + // alphabet_size = 3, string_len = 9, domain = 3+9+1 = 13 + // Literals: 0=a, 1=b, 2=c + // EOS: 3 + // Pointers: 4=ptr(C[0]), 5=ptr(C[1]), ... + let s: Vec = vec![0, 1, 2, 0, 1, 2, 0, 1, 2]; + let optimal_config = vec![ + 0, 1, 2, // literals a, b, c + 4, // ptr(C[0]) -> greedy "abc" + 4, // ptr(C[0]) -> greedy "abc" + 3, 3, 3, 3, // EOS padding + ]; + vec![crate::example_db::specs::ModelExampleSpec { + id: "minimum_internal_macro_data_compression", + instance: Box::new(MinimumInternalMacroDataCompression::new(3, s, 2)), + optimal_config, + optimal_value: serde_json::json!(7), + }] +} + +#[cfg(test)] +#[path = "../../unit_tests/models/misc/minimum_internal_macro_data_compression.rs"] +mod tests; diff --git a/src/models/misc/mod.rs b/src/models/misc/mod.rs index 1b8c1f05..ead9c9bc 100644 --- a/src/models/misc/mod.rs +++ b/src/models/misc/mod.rs @@ -17,6 +17,7 @@ //! - [`MultiprocessorScheduling`]: Schedule tasks on processors to meet a deadline //! - [`LongestCommonSubsequence`]: Longest Common Subsequence //! - [`MinimumExternalMacroDataCompression`]: Minimize compression cost using external dictionary +//! - [`MinimumInternalMacroDataCompression`]: Minimize self-referencing compression cost //! - [`MinimumTardinessSequencing`]: Minimize tardy tasks in single-machine scheduling //! - [`PaintShop`]: Minimize color switches in paint shop scheduling //! - [`CosineProductIntegration`]: Balanced sign assignment for integer frequencies @@ -84,6 +85,7 @@ mod knapsack; mod kth_largest_m_tuple; mod longest_common_subsequence; mod minimum_external_macro_data_compression; +mod minimum_internal_macro_data_compression; mod minimum_tardiness_sequencing; mod multiprocessor_scheduling; pub(crate) mod paintshop; @@ -131,6 +133,7 @@ pub use knapsack::Knapsack; pub use kth_largest_m_tuple::KthLargestMTuple; pub use longest_common_subsequence::LongestCommonSubsequence; pub use minimum_external_macro_data_compression::MinimumExternalMacroDataCompression; +pub use minimum_internal_macro_data_compression::MinimumInternalMacroDataCompression; pub use minimum_tardiness_sequencing::MinimumTardinessSequencing; pub use multiprocessor_scheduling::MultiprocessorScheduling; pub use paintshop::PaintShop; @@ -202,6 +205,7 @@ pub(crate) fn canonical_model_example_specs() -> Vec, + /// Total number of variables. + total_vars: usize, +} + +impl VarLayout { + fn new(n: usize, source_string: &[usize]) -> Self { + let lit_offset = 0; + let ptr_offset = lit_offset + n; + + // Enumerate all valid (i, l, r) triples where: + // - i is the start position in the source (0..n) + // - l is the segment length (1..n-i) + // - r is the reference position in the source (0..i), meaning + // the pointer copies from source[r..r+l] which must equal source[i..i+l] + // AND r < i (pointer references earlier decoded content) + let mut ptr_triples = Vec::new(); + for i in 0..n { + for l in 1..=(n - i) { + for r in 0..i { + // The pointer copies from decoded[r..r+l]. With non-overlapping + // semantics, decoded has exactly i characters before this pointer, + // so we need r + l <= i. + if r + l <= i + && r + l <= n + && source_string[r..r + l] == source_string[i..i + l] + { + ptr_triples.push((i, l, r)); + } + } + } + } + + let total_vars = ptr_offset + ptr_triples.len(); + Self { + n, + lit_offset, + ptr_offset, + ptr_triples, + total_vars, + } + } + + fn lit_var(&self, i: usize) -> usize { + self.lit_offset + i + } +} + +/// Result of reducing MinimumInternalMacroDataCompression to ILP. +#[derive(Debug, Clone)] +pub struct ReductionIMDCToILP { + target: ILP, + layout: VarLayout, + source_string: Vec, + alphabet_size: usize, +} + +impl ReductionResult for ReductionIMDCToILP { + type Source = MinimumInternalMacroDataCompression; + type Target = ILP; + + fn target_problem(&self) -> &ILP { + &self.target + } + + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + let n = self.layout.n; + let k = self.alphabet_size; + let eos = k; // end-of-string marker + + // First pass: collect segments and build source-to-compressed-position map. + // source_to_c_pos[i] = compressed position that covers source position i. + let mut source_to_c_pos = vec![0usize; n]; + let mut segments: Vec<(usize, usize, Option)> = Vec::new(); // (source_start, len, ref_source_pos) + let mut c_pos = 0; + let mut pos = 0; + + while pos < n { + if target_solution[self.layout.lit_var(pos)] == 1 { + source_to_c_pos[pos] = c_pos; + segments.push((pos, 1, None)); + c_pos += 1; + pos += 1; + continue; + } + let mut found = false; + for (idx, &(i, l, r)) in self.layout.ptr_triples.iter().enumerate() { + if i == pos && target_solution[self.layout.ptr_offset + idx] == 1 { + for offset in 0..l { + source_to_c_pos[pos + offset] = c_pos; + } + segments.push((pos, l, Some(r))); + c_pos += 1; + pos += l; + found = true; + break; + } + } + if !found { + pos += 1; + } + } + + // Second pass: build config using source_to_c_pos for pointer references + let mut config = vec![eos; n]; + for (idx, &(src_start, _len, ref_pos)) in segments.iter().enumerate() { + match ref_pos { + None => { + config[idx] = self.source_string[src_start]; + } + Some(r) => { + // Pointer references source position r, which is at + // compressed position source_to_c_pos[r] + config[idx] = k + 1 + source_to_c_pos[r]; + } + } + } + + config + } +} + +#[reduction( + overhead = { + num_vars = "string_len + string_len ^ 3", + num_constraints = "string_len + 1 + string_len", + } +)] +impl ReduceTo> for MinimumInternalMacroDataCompression { + type Result = ReductionIMDCToILP; + + fn reduce_to(&self) -> Self::Result { + let n = self.string_len(); + let k = self.alphabet_size(); + let h = self.pointer_cost(); + let s = self.string(); + + // Handle empty string + if n == 0 { + let layout = VarLayout::new(0, s); + let target = ILP::new(0, vec![], vec![], ObjectiveSense::Minimize); + return ReductionIMDCToILP { + target, + layout, + source_string: vec![], + alphabet_size: k, + }; + } + + let layout = VarLayout::new(n, s); + let num_vars = layout.total_vars; + let mut constraints = Vec::new(); + + // Flow conservation on DAG: positions 0..n are nodes. + // A segment covers source positions [i, i+l). + // Segments: lit[i] covers [i, i+1), ptr[i][l][r] covers [i, i+l). + // + // Flow constraints: + // At node 0: sum of outgoing segments = 1 + // At node j (1..n-1): sum of incoming = sum of outgoing + // At node n: sum of incoming = 1 + + let segment_terms = |i: usize, l: usize| -> Vec<(usize, f64)> { + let mut terms = Vec::new(); + if l == 1 { + terms.push((layout.lit_var(i), 1.0)); + } + // All ptr variables for segment (i, l, *) + for (idx, &(pi, pl, _)) in layout.ptr_triples.iter().enumerate() { + if pi == i && pl == l { + terms.push((layout.ptr_offset + idx, 1.0)); + } + } + terms + }; + + for node in 0..=n { + let mut all_terms: Vec<(usize, f64)> = Vec::new(); + + if node == 0 { + for l in 1..=n { + all_terms.extend(segment_terms(0, l)); + } + constraints.push(LinearConstraint::eq(all_terms, 1.0)); + } else if node == n { + for j in 0..n { + let l = n - j; + all_terms.extend(segment_terms(j, l)); + } + constraints.push(LinearConstraint::eq(all_terms, 1.0)); + } else { + let mut incoming = Vec::new(); + for j in 0..node { + let l = node - j; + incoming.extend(segment_terms(j, l)); + } + let mut outgoing = Vec::new(); + for l in 1..=(n - node) { + outgoing.extend(segment_terms(node, l)); + } + for (var, coef) in incoming { + all_terms.push((var, coef)); + } + for (var, coef) in outgoing { + all_terms.push((var, -coef)); + } + constraints.push(LinearConstraint::eq(all_terms, 0.0)); + } + } + + // Pointer precedence: for ptr[i][l][r], we need r < i (already enforced + // by the triple enumeration). Additionally, the content at source[r..r+l] + // must equal source[i..i+l] (also enforced by triple enumeration). + // No additional constraints needed since we pre-filtered valid triples. + + // Objective: minimize literals + h * pointers + // = sum lit[i] + h * sum ptr[i][l][r] + // Since each literal contributes 1 to |C| and each pointer contributes + // 1 to |C| plus (h-1) to the pointer penalty: + // cost = |C| + (h-1)*pointers = (lits + ptrs) + (h-1)*ptrs = lits + h*ptrs + let mut objective: Vec<(usize, f64)> = Vec::new(); + for i in 0..n { + objective.push((layout.lit_var(i), 1.0)); + } + for (idx, _) in layout.ptr_triples.iter().enumerate() { + objective.push((layout.ptr_offset + idx, h as f64)); + } + + let target = ILP::new(num_vars, constraints, objective, ObjectiveSense::Minimize); + + ReductionIMDCToILP { + target, + layout, + source_string: s.to_vec(), + alphabet_size: k, + } + } +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_rule_example_specs() -> Vec { + use crate::export::SolutionPair; + + // s = "ab" (len 2), alphabet {a,b} (size 2), h=2 + // Optimal: uncompressed C="ab", cost = 2 + // ILP: lit[0]=1, lit[1]=1, no pointers + vec![crate::example_db::specs::RuleExampleSpec { + id: "minimuminternalmacrodatacompression_to_ilp", + build: || { + let source = MinimumInternalMacroDataCompression::new(2, vec![0, 1], 2); + let reduction = ReduceTo::>::reduce_to(&source); + let layout = &reduction.layout; + + let mut target_config = vec![0usize; layout.total_vars]; + target_config[layout.lit_var(0)] = 1; + target_config[layout.lit_var(1)] = 1; + + let source_config = reduction.extract_solution(&target_config); + + crate::example_db::specs::rule_example_with_witness::<_, ILP>( + source, + SolutionPair { + source_config, + target_config, + }, + ) + }, + }] +} + +#[cfg(test)] +#[path = "../unit_tests/rules/minimuminternalmacrodatacompression_ilp.rs"] +mod tests; diff --git a/src/rules/mod.rs b/src/rules/mod.rs index abb9e1fb..a9a4cc73 100644 --- a/src/rules/mod.rs +++ b/src/rules/mod.rs @@ -162,6 +162,8 @@ pub(crate) mod minimumfeedbackvertexset_ilp; #[cfg(feature = "ilp-solver")] pub(crate) mod minimumhittingset_ilp; #[cfg(feature = "ilp-solver")] +pub(crate) mod minimuminternalmacrodatacompression_ilp; +#[cfg(feature = "ilp-solver")] pub(crate) mod minimummultiwaycut_ilp; #[cfg(feature = "ilp-solver")] pub(crate) mod minimumsetcovering_ilp; @@ -349,6 +351,7 @@ pub(crate) fn canonical_rule_example_specs() -> Vec::NAME, + "MinimumInternalMacroDataCompression" + ); + assert_eq!( + ::variant(), + vec![] + ); + // dims: 9 slots, domain = 3 + 9 + 1 = 13 + let dims = problem.dims(); + assert_eq!(dims.len(), 9); + assert!(dims.iter().all(|&d| d == 13)); +} + +#[test] +fn test_minimum_internal_macro_data_compression_evaluate_uncompressed() { + // alphabet {a, b}, s = "ab", h = 2 + let problem = MinimumInternalMacroDataCompression::new(2, vec![0, 1], 2); + // Uncompressed: C = [a, b] = [0, 1] + // active_len = 2, pointers = 0 + // cost = 2 + 0 = 2 + assert_eq!(problem.evaluate(&[0, 1]), Min(Some(2))); +} + +#[test] +fn test_minimum_internal_macro_data_compression_evaluate_with_pointer() { + // alphabet {a, b}, s = "abab", h = 2 + let problem = MinimumInternalMacroDataCompression::new(2, vec![0, 1, 0, 1], 2); + // C = [a, b, ptr(0), EOS] = [0, 1, 3, 2] + // ptr(0) at position 2: refs decoded[0] = 'a', greedy match: 'a','b' = "ab" + // decoded = "abab" = s + // active_len = 3, pointers = 1 + // cost = 3 + (2-1)*1 = 4 + assert_eq!(problem.evaluate(&[0, 1, 3, 2]), Min(Some(4))); +} + +#[test] +fn test_minimum_internal_macro_data_compression_evaluate_invalid_decode() { + // alphabet {a, b}, s = "ab", h = 2 + let problem = MinimumInternalMacroDataCompression::new(2, vec![0, 1], 2); + // C = [b, a] decodes to "ba" != "ab" + assert_eq!(problem.evaluate(&[1, 0]), Min(None)); +} + +#[test] +fn test_minimum_internal_macro_data_compression_evaluate_wrong_length() { + let problem = MinimumInternalMacroDataCompression::new(2, vec![0, 1], 2); + assert_eq!(problem.evaluate(&[0]), Min(None)); + assert_eq!(problem.evaluate(&[0, 1, 0]), Min(None)); +} + +#[test] +fn test_minimum_internal_macro_data_compression_evaluate_interleaved_eos() { + // EOS then non-EOS is invalid + let problem = MinimumInternalMacroDataCompression::new(2, vec![0, 1], 2); + // config = [EOS, a] = [2, 0] + assert_eq!(problem.evaluate(&[2, 0]), Min(None)); +} + +#[test] +fn test_minimum_internal_macro_data_compression_evaluate_pointer_forward_ref() { + // alphabet {a, b}, s = "ab", h = 2 + let problem = MinimumInternalMacroDataCompression::new(2, vec![0, 1], 2); + // C = [ptr(0)] -> pointer at first position references decoded[0], but nothing decoded yet + // ptr(C[0]) encoded as 3 (alphabet_size + 1 + 0 = 2+1+0 = 3) + assert_eq!(problem.evaluate(&[3, 2]), Min(None)); +} + +#[test] +fn test_minimum_internal_macro_data_compression_empty_string() { + let problem = MinimumInternalMacroDataCompression::new(2, vec![], 2); + assert_eq!(problem.dims(), Vec::::new()); + assert_eq!(problem.evaluate(&[]), Min(Some(0))); +} + +#[test] +fn test_minimum_internal_macro_data_compression_brute_force_simple() { + // alphabet {a, b}, s = "ab", h = 2 + // Only valid compression is uncompressed [0, 1], cost = 2 + let problem = MinimumInternalMacroDataCompression::new(2, vec![0, 1], 2); + let solver = BruteForce::new(); + let witness = solver + .find_witness(&problem) + .expect("should find a solution"); + let val = problem.evaluate(&witness); + assert_eq!(val, Min(Some(2))); +} + +#[test] +fn test_minimum_internal_macro_data_compression_brute_force_repeated() { + // alphabet {a, b}, s = "abab", h = 2 + // domain = 2+4+1 = 7, 7^4 = 2401 configs (feasible) + let problem = MinimumInternalMacroDataCompression::new(2, vec![0, 1, 0, 1], 2); + let solver = BruteForce::new(); + let witness = solver + .find_witness(&problem) + .expect("should find a solution"); + let val = problem.evaluate(&witness); + assert!(val.0.is_some()); + // Optimal: C = [a, b, ptr(0), EOS] -> cost = 3 + 1 = 4 + // Or uncompressed: cost = 4 + 0 = 4 (same) + assert_eq!(val.0.unwrap(), 4); +} + +#[test] +fn test_minimum_internal_macro_data_compression_solve_aggregate() { + use crate::solvers::Solver; + let problem = MinimumInternalMacroDataCompression::new(2, vec![0, 1], 2); + let solver = BruteForce::new(); + let val = solver.solve(&problem); + assert_eq!(val, Min(Some(2))); +} + +#[test] +fn test_minimum_internal_macro_data_compression_serialization() { + let problem = MinimumInternalMacroDataCompression::new(3, vec![0, 1, 2], 2); + let json = serde_json::to_value(&problem).unwrap(); + let restored: MinimumInternalMacroDataCompression = serde_json::from_value(json).unwrap(); + assert_eq!(restored.alphabet_size(), problem.alphabet_size()); + assert_eq!(restored.string(), problem.string()); + assert_eq!(restored.pointer_cost(), problem.pointer_cost()); +} + +#[test] +fn test_minimum_internal_macro_data_compression_paper_example() { + // Issue example: alphabet {a,b,c} (3), s="abcabcabc" (9), h=2 + // Optimal: C = [a, b, c, ptr(0), ptr(0), EOS, EOS, EOS, EOS] + // active_len=5, pointers=2, cost = 5 + 1*2 = 7 + let problem = MinimumInternalMacroDataCompression::new(3, vec![0, 1, 2, 0, 1, 2, 0, 1, 2], 2); + let config = vec![0, 1, 2, 4, 4, 3, 3, 3, 3]; + // ptr(C[0]) = alphabet_size + 1 + 0 = 3 + 1 + 0 = 4 + let val = problem.evaluate(&config); + assert_eq!(val, Min(Some(7))); +} + +#[test] +fn test_minimum_internal_macro_data_compression_find_all_witnesses() { + // alphabet {a}, s = "a", h = 2 + // domain = 1+1+1 = 3, 3^1 = 3 configs + let problem = MinimumInternalMacroDataCompression::new(1, vec![0], 2); + let solver = BruteForce::new(); + let solutions = solver.find_all_witnesses(&problem); + // Only valid: [0] (literal 'a'), cost = 1 + assert_eq!(solutions.len(), 1); + assert_eq!(solutions[0], vec![0]); +} + +#[test] +fn test_minimum_internal_macro_data_compression_pointer_doubling() { + // alphabet {a}, s = "aaaa", h = 1 + // No overlapping copy: each ptr copies from pre-existing decoded content. + // C = [a, ptr(0), ptr(0), EOS] = [0, 2, 2, 1] + // - pos 0: literal 'a', decoded=[0] + // - pos 1: ptr(0), copy decoded[0..1]="a" (1 char), decoded=[0,0] + // - pos 2: ptr(0), copy decoded[0..2]="aa" (2 chars), decoded=[0,0,0,0] + // decoded = "aaaa" = s + // active_len = 3, pointers = 2, cost = 3 + 0*2 = 3 + let problem = MinimumInternalMacroDataCompression::new(1, vec![0, 0, 0, 0], 1); + let config = vec![0, 2, 2, 1]; // a, ptr(0), ptr(0), EOS + let val = problem.evaluate(&config); + assert_eq!(val, Min(Some(3))); +} diff --git a/src/unit_tests/rules/minimuminternalmacrodatacompression_ilp.rs b/src/unit_tests/rules/minimuminternalmacrodatacompression_ilp.rs new file mode 100644 index 00000000..39a64245 --- /dev/null +++ b/src/unit_tests/rules/minimuminternalmacrodatacompression_ilp.rs @@ -0,0 +1,120 @@ +use crate::models::algebraic::ILP; +use crate::models::misc::MinimumInternalMacroDataCompression; +use crate::rules::traits::{ReduceTo, ReductionResult}; +use crate::solvers::{BruteForce, Solver}; +use crate::traits::Problem; +use crate::types::Min; + +#[test] +fn test_imdc_to_ilp_closed_loop_simple() { + // s = "ab", alphabet {a,b}, h=2 + // Optimal: uncompressed, cost=2 + let source = MinimumInternalMacroDataCompression::new(2, vec![0, 1], 2); + let reduction = ReduceTo::>::reduce_to(&source); + let target = reduction.target_problem(); + + let solver = BruteForce::new(); + let target_witness = solver.find_witness(target).expect("ILP should be feasible"); + let source_config = reduction.extract_solution(&target_witness); + let val = source.evaluate(&source_config); + assert!(val.0.is_some()); + assert_eq!(val.0.unwrap(), 2); +} + +#[test] +fn test_imdc_to_ilp_closed_loop_repeated() { + // s = "abab", alphabet {a,b}, h=2 + // Optimal: cost=4 (uncompressed or pointer, both cost 4) + let source = MinimumInternalMacroDataCompression::new(2, vec![0, 1, 0, 1], 2); + let reduction = ReduceTo::>::reduce_to(&source); + let target = reduction.target_problem(); + + let solver = BruteForce::new(); + let target_witness = solver.find_witness(target).expect("ILP should be feasible"); + let source_config = reduction.extract_solution(&target_witness); + let val = source.evaluate(&source_config); + assert!(val.0.is_some()); + assert_eq!(val.0.unwrap(), 4); +} + +#[test] +fn test_imdc_to_ilp_closed_loop_low_pointer_cost() { + // s = "abab", alphabet {a,b}, h=1 + // With h=1, pointers cost 0 extra: cost = |C| + // Optimal with pointer: C=[a,b,ptr(0)], active=3, ptrs=1, cost=3+0=3 + let source = MinimumInternalMacroDataCompression::new(2, vec![0, 1, 0, 1], 1); + let reduction = ReduceTo::>::reduce_to(&source); + let target = reduction.target_problem(); + + let solver = BruteForce::new(); + let target_witness = solver.find_witness(target).expect("ILP should be feasible"); + let source_config = reduction.extract_solution(&target_witness); + let val = source.evaluate(&source_config); + assert!(val.0.is_some()); + // Verify against brute force + let bf_val = BruteForce::new().solve(&source); + assert_eq!(val, bf_val); +} + +#[test] +fn test_imdc_to_ilp_empty_string() { + let source = MinimumInternalMacroDataCompression::new(2, vec![], 2); + let reduction = ReduceTo::>::reduce_to(&source); + let target = reduction.target_problem(); + assert_eq!(target.num_variables(), 0); + let source_config = reduction.extract_solution(&[]); + assert_eq!(source.evaluate(&source_config), Min(Some(0))); +} + +#[test] +fn test_imdc_to_ilp_single_char() { + // s = "a", alphabet {a}, h=2 + // Only valid: literal, cost=1 + let source = MinimumInternalMacroDataCompression::new(1, vec![0], 2); + let reduction = ReduceTo::>::reduce_to(&source); + let target = reduction.target_problem(); + + let solver = BruteForce::new(); + let target_witness = solver.find_witness(target).expect("ILP should be feasible"); + let source_config = reduction.extract_solution(&target_witness); + assert_eq!(source.evaluate(&source_config), Min(Some(1))); +} + +#[test] +fn test_imdc_to_ilp_structure() { + // Verify the ILP has the right number of variables + let source = MinimumInternalMacroDataCompression::new(2, vec![0, 1, 0, 1], 2); + let reduction = ReduceTo::>::reduce_to(&source); + let target = reduction.target_problem(); + // n=4 literals + valid ptr triples + assert!(target.num_variables() >= 4); + // Must be a minimization problem + assert_eq!(target.dims(), vec![2; target.num_variables()]); +} + +#[test] +fn test_imdc_to_ilp_vs_brute_force() { + // Compare ILP result against brute-force for small instances + for (k, s, h) in [ + (1, vec![0, 0, 0], 2), + (2, vec![0, 1, 0], 2), + (2, vec![0, 0, 1, 1], 1), + ] { + let source = MinimumInternalMacroDataCompression::new(k, s.clone(), h); + let bf_val = BruteForce::new().solve(&source); + + let reduction = ReduceTo::>::reduce_to(&source); + let target = reduction.target_problem(); + let target_witness = BruteForce::new() + .find_witness(target) + .expect("ILP should be feasible"); + let source_config = reduction.extract_solution(&target_witness); + let ilp_val = source.evaluate(&source_config); + + assert_eq!( + ilp_val, bf_val, + "ILP and brute-force disagree for k={}, s={:?}, h={}", + k, s, h + ); + } +} From beda45621452ca002f832e63fc53be813f2aaa16 Mon Sep 17 00:00:00 2001 From: Xiwei Pan Date: Mon, 30 Mar 2026 04:26:31 +0800 Subject: [PATCH 02/21] feat: generalize MinimumTardinessSequencing with weight parameter (#495) Add W type parameter to MinimumTardinessSequencing: W=One for unit-length tasks (existing behavior), W=usize for arbitrary task lengths. Includes ILP reduction for both variants, canonical examples, and updated tests. Co-Authored-By: Claude Opus 4.6 (1M context) --- docs/paper/reductions.typ | 4 +- problemreductions-cli/src/commands/create.rs | 69 +++-- .../misc/minimum_tardiness_sequencing.rs | 250 ++++++++++++------ src/rules/minimumtardinesssequencing_ilp.rs | 174 ++++++++---- src/types.rs | 9 + .../misc/minimum_tardiness_sequencing.rs | 165 ++++++++---- .../rules/minimumtardinesssequencing_ilp.rs | 54 +++- src/unit_tests/trait_consistency.rs | 2 +- 8 files changed, 522 insertions(+), 205 deletions(-) diff --git a/docs/paper/reductions.typ b/docs/paper/reductions.typ index b95445c9..78eb412e 100644 --- a/docs/paper/reductions.typ +++ b/docs/paper/reductions.typ @@ -6114,8 +6114,8 @@ A classical NP-complete problem from Garey and Johnson @garey1979[Ch.~3, p.~76], ] } #{ - let x = load-model-example("MinimumTardinessSequencing") - let ntasks = x.instance.num_tasks + let x = load-model-example("MinimumTardinessSequencing", variant: (weight: "One")) + let ntasks = x.instance.lengths.len() let deadlines = x.instance.deadlines let precs = x.instance.precedences let sol = (config: x.optimal_config, metric: x.optimal_value) diff --git a/problemreductions-cli/src/commands/create.rs b/problemreductions-cli/src/commands/create.rs index 0276d363..20195f5d 100644 --- a/problemreductions-cli/src/commands/create.rs +++ b/problemreductions-cli/src/commands/create.rs @@ -42,6 +42,7 @@ use problemreductions::topology::{ BipartiteGraph, DirectedGraph, Graph, KingsSubgraph, MixedGraph, SimpleGraph, TriangularSubgraph, UnitDiskGraph, }; +use problemreductions::types::One; use serde::Serialize; use std::collections::{BTreeMap, BTreeSet}; @@ -3564,33 +3565,55 @@ pub fn create(args: &CreateArgs, out: &OutputConfig) -> Result<()> { "MinimumTardinessSequencing" => { let deadlines_str = args.deadlines.as_deref().ok_or_else(|| { anyhow::anyhow!( - "MinimumTardinessSequencing requires --deadlines and --n\n\n\ - Usage: pred create MinimumTardinessSequencing --n 5 --deadlines 5,5,5,3,3 [--precedence-pairs \"0>3,1>3,1>4,2>4\"]" - ) - })?; - let num_tasks = args.n.ok_or_else(|| { - anyhow::anyhow!( - "MinimumTardinessSequencing requires --n (number of tasks)\n\n\ - Usage: pred create MinimumTardinessSequencing --n 5 --deadlines 5,5,5,3,3" + "MinimumTardinessSequencing requires --deadlines\n\n\ + Usage: pred create MinimumTardinessSequencing --n 5 --deadlines 5,5,5,3,3 [--precedence-pairs \"0>3,1>3,1>4,2>4\"] [--sizes 3,2,2,1,2]" ) })?; let deadlines: Vec = util::parse_comma_list(deadlines_str)?; let precedences = parse_precedence_pairs(args.precedence_pairs.as_deref())?; - anyhow::ensure!( - deadlines.len() == num_tasks, - "deadlines length ({}) must equal num_tasks ({})", - deadlines.len(), - num_tasks - ); - validate_precedence_pairs(&precedences, num_tasks)?; - ( - ser(MinimumTardinessSequencing::new( - num_tasks, - deadlines, - precedences, - ))?, - resolved_variant.clone(), - ) + + if let Some(sizes_str) = args.sizes.as_deref() { + // Arbitrary-length variant (W = usize) + let lengths: Vec = util::parse_comma_list(sizes_str)?; + anyhow::ensure!( + lengths.len() == deadlines.len(), + "sizes length ({}) must equal deadlines length ({})", + lengths.len(), + deadlines.len() + ); + validate_precedence_pairs(&precedences, lengths.len())?; + ( + ser(MinimumTardinessSequencing::::with_lengths( + lengths, + deadlines, + precedences, + ))?, + resolved_variant.clone(), + ) + } else { + // Unit-length variant (W = One) + let num_tasks = args.n.ok_or_else(|| { + anyhow::anyhow!( + "MinimumTardinessSequencing requires --n (number of tasks) or --sizes\n\n\ + Usage: pred create MinimumTardinessSequencing --n 5 --deadlines 5,5,5,3,3" + ) + })?; + anyhow::ensure!( + deadlines.len() == num_tasks, + "deadlines length ({}) must equal num_tasks ({})", + deadlines.len(), + num_tasks + ); + validate_precedence_pairs(&precedences, num_tasks)?; + ( + ser(MinimumTardinessSequencing::::new( + num_tasks, + deadlines, + precedences, + ))?, + resolved_variant.clone(), + ) + } } // SchedulingWithIndividualDeadlines diff --git a/src/models/misc/minimum_tardiness_sequencing.rs b/src/models/misc/minimum_tardiness_sequencing.rs index eba25bb0..6b656f45 100644 --- a/src/models/misc/minimum_tardiness_sequencing.rs +++ b/src/models/misc/minimum_tardiness_sequencing.rs @@ -1,13 +1,16 @@ //! Minimum Tardiness Sequencing problem implementation. //! //! A classical NP-complete single-machine scheduling problem (SS2 from -//! Garey & Johnson, 1979) where unit-length tasks with precedence constraints +//! Garey & Johnson, 1979) where tasks with precedence constraints //! and deadlines must be scheduled to minimize the number of tardy tasks. -//! Corresponds to scheduling notation `1|prec, pj=1|sum Uj`. +//! +//! Variants: +//! - `MinimumTardinessSequencing` — unit-length tasks (`1|prec, pj=1|∑Uj`) +//! - `MinimumTardinessSequencing` — arbitrary-length tasks (`1|prec|∑Uj`) -use crate::registry::{FieldInfo, ProblemSchemaEntry}; +use crate::registry::{FieldInfo, ProblemSchemaEntry, VariantDimension}; use crate::traits::Problem; -use crate::types::Min; +use crate::types::{Min, One, WeightElement}; use serde::{Deserialize, Serialize}; inventory::submit! { @@ -15,12 +18,12 @@ inventory::submit! { name: "MinimumTardinessSequencing", display_name: "Minimum Tardiness Sequencing", aliases: &[], - dimensions: &[], + dimensions: &[VariantDimension::new("weight", "One", &["One", "usize"])], module_path: module_path!(), - description: "Schedule unit-length tasks with precedence constraints and deadlines to minimize the number of tardy tasks", + description: "Schedule tasks with precedence constraints and deadlines to minimize the number of tardy tasks", fields: &[ - FieldInfo { name: "num_tasks", type_name: "usize", description: "Number of tasks |T|" }, - FieldInfo { name: "deadlines", type_name: "Vec", description: "Deadline d(t) for each task (1-indexed finish time)" }, + FieldInfo { name: "lengths", type_name: "Vec", description: "Processing time l(t) for each task" }, + FieldInfo { name: "deadlines", type_name: "Vec", description: "Deadline d(t) for each task" }, FieldInfo { name: "precedences", type_name: "Vec<(usize, usize)>", description: "Precedence pairs (predecessor, successor)" }, ], } @@ -28,48 +31,41 @@ inventory::submit! { /// Minimum Tardiness Sequencing problem. /// -/// Given a set T of tasks, each with unit length and a deadline d(t), +/// Given a set T of tasks, each with a processing time l(t) and a deadline d(t), /// and a partial order (precedence constraints) on T, find a schedule -/// `sigma: T -> {0, 1, ..., |T|-1}` that is a valid permutation, -/// respects precedence constraints (`sigma(t) < sigma(t')` whenever `t < t'`), -/// and minimizes the number of tardy tasks (`|{t : sigma(t)+1 > d(t)}|`). +/// that is a valid permutation respecting precedence constraints +/// and minimizes the number of tardy tasks. /// -/// # Representation +/// # Type Parameters /// -/// Each task has a variable representing its position in the schedule. -/// A configuration is valid if and only if it is a bijective mapping -/// (permutation) that respects all precedence constraints. +/// * `W` - The weight/length type. `One` for unit-length tasks, `usize` for arbitrary. /// /// # Example /// /// ``` /// use problemreductions::models::misc::MinimumTardinessSequencing; +/// use problemreductions::types::One; /// use problemreductions::{Problem, Solver, BruteForce}; /// -/// let problem = MinimumTardinessSequencing::new( +/// // Unit-length: 3 tasks, task 0 must precede task 2 +/// let problem = MinimumTardinessSequencing::::new( /// 3, /// vec![2, 3, 1], -/// vec![(0, 2)], // task 0 must precede task 2 +/// vec![(0, 2)], /// ); /// let solver = BruteForce::new(); /// let solution = solver.find_witness(&problem); /// assert!(solution.is_some()); /// ``` #[derive(Debug, Clone, Serialize, Deserialize)] -pub struct MinimumTardinessSequencing { - num_tasks: usize, +pub struct MinimumTardinessSequencing { + lengths: Vec, deadlines: Vec, precedences: Vec<(usize, usize)>, } -impl MinimumTardinessSequencing { - /// Create a new MinimumTardinessSequencing instance. - /// - /// # Arguments - /// - /// * `num_tasks` - Number of tasks. - /// * `deadlines` - Deadline for each task (1-indexed: a task at position `p` finishes at time `p+1`). - /// * `precedences` - List of `(predecessor, successor)` pairs. +impl MinimumTardinessSequencing { + /// Create a new unit-length MinimumTardinessSequencing instance. /// /// # Panics /// @@ -81,30 +77,72 @@ impl MinimumTardinessSequencing { num_tasks, "deadlines length must equal num_tasks" ); - for &(pred, succ) in &precedences { - assert!( - pred < num_tasks, - "predecessor index {} out of range (num_tasks = {})", - pred, - num_tasks - ); - assert!( - succ < num_tasks, - "successor index {} out of range (num_tasks = {})", - succ, - num_tasks - ); + validate_precedences(num_tasks, &precedences); + Self { + lengths: vec![One; num_tasks], + deadlines, + precedences, } + } +} + +impl MinimumTardinessSequencing { + /// Create a new arbitrary-length MinimumTardinessSequencing instance. + /// + /// # Panics + /// + /// Panics if `lengths.len() != deadlines.len()`, if any length is 0, + /// or if any task index in `precedences` is out of range. + pub fn with_lengths( + lengths: Vec, + deadlines: Vec, + precedences: Vec<(usize, usize)>, + ) -> Self { + assert_eq!( + lengths.len(), + deadlines.len(), + "lengths and deadlines must have the same length" + ); + assert!( + lengths.iter().all(|&l| l > 0), + "all task lengths must be positive" + ); + let num_tasks = lengths.len(); + validate_precedences(num_tasks, &precedences); Self { - num_tasks, + lengths, deadlines, precedences, } } +} + +fn validate_precedences(num_tasks: usize, precedences: &[(usize, usize)]) { + for &(pred, succ) in precedences { + assert!( + pred < num_tasks, + "predecessor index {} out of range (num_tasks = {})", + pred, + num_tasks + ); + assert!( + succ < num_tasks, + "successor index {} out of range (num_tasks = {})", + succ, + num_tasks + ); + } +} +impl MinimumTardinessSequencing { /// Returns the number of tasks. pub fn num_tasks(&self) -> usize { - self.num_tasks + self.deadlines.len() + } + + /// Returns the task lengths. + pub fn lengths(&self) -> &[W] { + &self.lengths } /// Returns the deadlines. @@ -121,45 +159,87 @@ impl MinimumTardinessSequencing { pub fn num_precedences(&self) -> usize { self.precedences.len() } + + /// Decode and validate a schedule, returning the inverse permutation (sigma). + /// Returns None if the config is invalid or violates precedences. + fn decode_and_validate(&self, config: &[usize]) -> Option> { + let n = self.num_tasks(); + let schedule = super::decode_lehmer(config, n)?; + + let mut sigma = vec![0usize; n]; + for (pos, &task) in schedule.iter().enumerate() { + sigma[task] = pos; + } + + for &(pred, succ) in &self.precedences { + if sigma[pred] >= sigma[succ] { + return None; + } + } + + Some(sigma) + } } -impl Problem for MinimumTardinessSequencing { +impl Problem for MinimumTardinessSequencing { const NAME: &'static str = "MinimumTardinessSequencing"; type Value = Min; fn variant() -> Vec<(&'static str, &'static str)> { - crate::variant_params![] + crate::variant_params![One] } fn dims(&self) -> Vec { - super::lehmer_dims(self.num_tasks) + super::lehmer_dims(self.num_tasks()) } fn evaluate(&self, config: &[usize]) -> Min { - let n = self.num_tasks; - let Some(schedule) = super::decode_lehmer(config, n) else { + let n = self.num_tasks(); + let Some(sigma) = self.decode_and_validate(config) else { return Min(None); }; - // schedule[i] = the task scheduled at position i. - // We need sigma(task) = position, i.e., the inverse permutation. - let mut sigma = vec![0usize; n]; - for (pos, &task) in schedule.iter().enumerate() { - sigma[task] = pos; + // Unit length: completion time at position p is p + 1 + let tardy_count = (0..n).filter(|&t| sigma[t] + 1 > self.deadlines[t]).count(); + + Min(Some(tardy_count)) + } +} + +impl Problem for MinimumTardinessSequencing { + const NAME: &'static str = "MinimumTardinessSequencing"; + type Value = Min; + + fn variant() -> Vec<(&'static str, &'static str)> { + crate::variant_params![usize] + } + + fn dims(&self) -> Vec { + super::lehmer_dims(self.num_tasks()) + } + + fn evaluate(&self, config: &[usize]) -> Min { + let n = self.num_tasks(); + let Some(sigma) = self.decode_and_validate(config) else { + return Min(None); + }; + + // Build schedule order from sigma (inverse permutation) + let mut schedule = vec![0usize; n]; + for (task, &pos) in sigma.iter().enumerate() { + schedule[pos] = task; } - // Check precedence constraints: for each (pred, succ), sigma(pred) < sigma(succ) - for &(pred, succ) in &self.precedences { - if sigma[pred] >= sigma[succ] { - return Min(None); - } + // Compute completion times using actual lengths + let mut completion = vec![0usize; n]; + let mut cumulative = 0usize; + for &task in &schedule { + cumulative += self.lengths[task]; + completion[task] = cumulative; } - // Count tardy tasks: task t is tardy if sigma(t) + 1 > d(t) - let tardy_count = sigma - .iter() - .enumerate() - .filter(|&(t, &pos)| pos + 1 > self.deadlines[t]) + let tardy_count = (0..n) + .filter(|&t| completion[t] > self.deadlines[t]) .count(); Min(Some(tardy_count)) @@ -167,24 +247,40 @@ impl Problem for MinimumTardinessSequencing { } crate::declare_variants! { - default MinimumTardinessSequencing => "2^num_tasks", + default MinimumTardinessSequencing => "2^num_tasks", + MinimumTardinessSequencing => "2^num_tasks", } #[cfg(feature = "example-db")] pub(crate) fn canonical_model_example_specs() -> Vec { - vec![crate::example_db::specs::ModelExampleSpec { - id: "minimum_tardiness_sequencing", - // 4 tasks with precedence 0 -> 2 (task 0 before task 2). - // Deadlines: task 0 by time 2, task 1 by time 3, task 2 by time 1, task 3 by time 4. - instance: Box::new(MinimumTardinessSequencing::new( - 4, - vec![2, 3, 1, 4], - vec![(0, 2)], - )), - // Lehmer code [0,0,0,0] = identity permutation (schedule order 0,1,2,3) - optimal_config: vec![0, 0, 0, 0], - optimal_value: serde_json::json!(1), - }] + vec![ + // Unit-length variant + crate::example_db::specs::ModelExampleSpec { + id: "minimum_tardiness_sequencing", + instance: Box::new(MinimumTardinessSequencing::::new( + 4, + vec![2, 3, 1, 4], + vec![(0, 2)], + )), + optimal_config: vec![0, 0, 0, 0], + optimal_value: serde_json::json!(1), + }, + // Arbitrary-length variant + crate::example_db::specs::ModelExampleSpec { + id: "minimum_tardiness_sequencing_weighted", + // 5 tasks, lengths [3,2,2,1,2], deadlines [4,3,8,3,6], prec (0→2, 1→3) + // Optimal schedule: t0,t4,t2,t1,t3 → 2 tardy + // Lehmer [0,3,1,0,0]: avail=[0,1,2,3,4] pick 0→0; [1,2,3,4] pick 3→4; + // [1,2,3] pick 1→2; [1,3] pick 0→1; [3] pick 0→3 + instance: Box::new(MinimumTardinessSequencing::::with_lengths( + vec![3, 2, 2, 1, 2], + vec![4, 3, 8, 3, 6], + vec![(0, 2), (1, 3)], + )), + optimal_config: vec![0, 3, 1, 0, 0], + optimal_value: serde_json::json!(2), + }, + ] } #[cfg(test)] diff --git a/src/rules/minimumtardinesssequencing_ilp.rs b/src/rules/minimumtardinesssequencing_ilp.rs index 8d976e35..05e790e7 100644 --- a/src/rules/minimumtardinesssequencing_ilp.rs +++ b/src/rules/minimumtardinesssequencing_ilp.rs @@ -2,21 +2,16 @@ //! //! Position-assignment ILP: binary x_{j,p} placing task j in position p, //! with binary tardy indicator u_j. Precedence constraints and a -//! deadline-based tardy indicator with big-M = n. +//! length-aware tardy indicator with big-M linearization. use crate::models::algebraic::{LinearConstraint, ObjectiveSense, ILP}; use crate::models::misc::MinimumTardinessSequencing; use crate::reduction; use crate::rules::ilp_helpers::{one_hot_decode, permutation_to_lehmer}; use crate::rules::traits::{ReduceTo, ReductionResult}; +use crate::types::One; -/// Result of reducing MinimumTardinessSequencing to ILP. -/// -/// Variable layout: -/// - x_{j,p} for j in 0..n, p in 0..n: index `j*n + p` -/// - u_j for j in 0..n: index `n*n + j` -/// -/// Total: n^2 + n variables. +/// Result of reducing MinimumTardinessSequencing to ILP. #[derive(Debug, Clone)] pub struct ReductionMTSToILP { target: ILP, @@ -24,77 +19,151 @@ pub struct ReductionMTSToILP { } impl ReductionResult for ReductionMTSToILP { - type Source = MinimumTardinessSequencing; + type Source = MinimumTardinessSequencing; type Target = ILP; fn target_problem(&self) -> &ILP { &self.target } - /// Extract: decode position assignment x_{j,p} → permutation → Lehmer code. fn extract_solution(&self, target_solution: &[usize]) -> Vec { let n = self.num_tasks; - // Decode: for each position p, find which job j has x_{j,p}=1 let schedule = one_hot_decode(target_solution, n, n, 0); permutation_to_lehmer(&schedule) } } +/// Result of reducing MinimumTardinessSequencing to ILP. +#[derive(Debug, Clone)] +pub struct ReductionMTSWeightedToILP { + target: ILP, + num_tasks: usize, +} + +impl ReductionResult for ReductionMTSWeightedToILP { + type Source = MinimumTardinessSequencing; + type Target = ILP; + + fn target_problem(&self) -> &ILP { + &self.target + } + + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + let n = self.num_tasks; + let schedule = one_hot_decode(target_solution, n, n, 0); + permutation_to_lehmer(&schedule) + } +} + +/// Build task assignment + position filling + precedence constraints (shared). +fn build_common_constraints( + n: usize, + precedences: &[(usize, usize)], + x_var: impl Fn(usize, usize) -> usize, +) -> Vec { + let mut constraints = Vec::new(); + + // 1. Each task assigned to exactly one position + for j in 0..n { + let terms: Vec<(usize, f64)> = (0..n).map(|p| (x_var(j, p), 1.0)).collect(); + constraints.push(LinearConstraint::eq(terms, 1.0)); + } + + // 2. Each position has exactly one task + for p in 0..n { + let terms: Vec<(usize, f64)> = (0..n).map(|j| (x_var(j, p), 1.0)).collect(); + constraints.push(LinearConstraint::eq(terms, 1.0)); + } + + // 3. Precedence constraints + for &(i, j) in precedences { + let mut terms: Vec<(usize, f64)> = Vec::new(); + for p in 0..n { + terms.push((x_var(j, p), p as f64)); + terms.push((x_var(i, p), -(p as f64))); + } + constraints.push(LinearConstraint::ge(terms, 1.0)); + } + + constraints +} + +// Unit-length variant #[reduction(overhead = { num_vars = "num_tasks * num_tasks + num_tasks", num_constraints = "2 * num_tasks + num_precedences + num_tasks", })] -impl ReduceTo> for MinimumTardinessSequencing { +impl ReduceTo> for MinimumTardinessSequencing { type Result = ReductionMTSToILP; fn reduce_to(&self) -> Self::Result { let n = self.num_tasks(); let num_x_vars = n * n; - let num_u_vars = n; - let num_vars = num_x_vars + num_u_vars; + let num_vars = num_x_vars + n; let big_m = n as f64; let x_var = |j: usize, p: usize| -> usize { j * n + p }; let u_var = |j: usize| -> usize { num_x_vars + j }; - let mut constraints = Vec::new(); + let mut constraints = build_common_constraints(n, self.precedences(), x_var); - // 1. Each task assigned to exactly one position: Σ_p x_{j,p} = 1 for all j + // Tardy indicator (unit length: completion = p+1) for j in 0..n { - let terms: Vec<(usize, f64)> = (0..n).map(|p| (x_var(j, p), 1.0)).collect(); - constraints.push(LinearConstraint::eq(terms, 1.0)); + let mut terms: Vec<(usize, f64)> = + (0..n).map(|p| (x_var(j, p), (p + 1) as f64)).collect(); + terms.push((u_var(j), -big_m)); + constraints.push(LinearConstraint::le(terms, self.deadlines()[j] as f64)); } - // 2. Each position has exactly one task: Σ_j x_{j,p} = 1 for all p - for p in 0..n { - let terms: Vec<(usize, f64)> = (0..n).map(|j| (x_var(j, p), 1.0)).collect(); - constraints.push(LinearConstraint::eq(terms, 1.0)); - } + let objective: Vec<(usize, f64)> = (0..n).map(|j| (u_var(j), 1.0)).collect(); - // 3. Precedence: Σ_p p*x_{i,p} + 1 <= Σ_p p*x_{j,p} for each (i,j) - // => Σ_p p*x_{j,p} - Σ_p p*x_{i,p} >= 1 - for &(i, j) in self.precedences() { - let mut terms: Vec<(usize, f64)> = Vec::new(); - for p in 0..n { - terms.push((x_var(j, p), p as f64)); - terms.push((x_var(i, p), -(p as f64))); - } - constraints.push(LinearConstraint::ge(terms, 1.0)); + ReductionMTSToILP { + target: ILP::new(num_vars, constraints, objective, ObjectiveSense::Minimize), + num_tasks: n, } + } +} + +// Arbitrary-length variant +#[reduction(overhead = { + num_vars = "num_tasks * num_tasks + num_tasks", + num_constraints = "2 * num_tasks + num_precedences + num_tasks * num_tasks", +})] +impl ReduceTo> for MinimumTardinessSequencing { + type Result = ReductionMTSWeightedToILP; - // 4. Tardy indicator: Σ_p (p+1)*x_{j,p} - d_j <= M*u_j for all j - // => Σ_p (p+1)*x_{j,p} - M*u_j <= d_j + fn reduce_to(&self) -> Self::Result { + let n = self.num_tasks(); + let num_x_vars = n * n; + let num_vars = num_x_vars + n; + let total_length: usize = self.lengths().iter().copied().sum(); + let big_m = total_length as f64; + + let x_var = |j: usize, p: usize| -> usize { j * n + p }; + let u_var = |j: usize| -> usize { num_x_vars + j }; + + let mut constraints = build_common_constraints(n, self.precedences(), x_var); + + // Tardy indicator for arbitrary lengths. + let lengths = self.lengths(); for j in 0..n { - let mut terms: Vec<(usize, f64)> = - (0..n).map(|p| (x_var(j, p), (p + 1) as f64)).collect(); - terms.push((u_var(j), -big_m)); - constraints.push(LinearConstraint::le(terms, self.deadlines()[j] as f64)); + for p in 0..n { + let mut terms: Vec<(usize, f64)> = Vec::new(); + terms.push((x_var(j, p), big_m)); + for pp in 0..p { + for (jj, &len) in lengths.iter().enumerate() { + terms.push((x_var(jj, pp), len as f64)); + } + } + terms.push((u_var(j), -big_m)); + let rhs = self.deadlines()[j] as f64 - lengths[j] as f64 + big_m; + constraints.push(LinearConstraint::le(terms, rhs)); + } } - // Objective: minimize Σ_j u_j let objective: Vec<(usize, f64)> = (0..n).map(|j| (u_var(j), 1.0)).collect(); - ReductionMTSToILP { + ReductionMTSWeightedToILP { target: ILP::new(num_vars, constraints, objective, ObjectiveSense::Minimize), num_tasks: n, } @@ -103,13 +172,26 @@ impl ReduceTo> for MinimumTardinessSequencing { #[cfg(feature = "example-db")] pub(crate) fn canonical_rule_example_specs() -> Vec { - vec![crate::example_db::specs::RuleExampleSpec { - id: "minimumtardinesssequencing_to_ilp", - build: || { - let source = MinimumTardinessSequencing::new(3, vec![2, 3, 1], vec![(0, 2)]); - crate::example_db::specs::rule_example_via_ilp::<_, bool>(source) + vec![ + crate::example_db::specs::RuleExampleSpec { + id: "minimumtardinesssequencing_to_ilp", + build: || { + let source = MinimumTardinessSequencing::::new(3, vec![2, 3, 1], vec![(0, 2)]); + crate::example_db::specs::rule_example_via_ilp::<_, bool>(source) + }, + }, + crate::example_db::specs::RuleExampleSpec { + id: "minimumtardinesssequencing_weighted_to_ilp", + build: || { + let source = MinimumTardinessSequencing::::with_lengths( + vec![2, 1, 3], + vec![3, 4, 5], + vec![(0, 2)], + ); + crate::example_db::specs::rule_example_via_ilp::<_, bool>(source) + }, }, - }] + ] } #[cfg(test)] diff --git a/src/types.rs b/src/types.rs index 4d14f6ca..4b698f30 100644 --- a/src/types.rs +++ b/src/types.rs @@ -59,6 +59,14 @@ impl WeightElement for f64 { } } +impl WeightElement for usize { + type Sum = usize; + const IS_UNIT: bool = false; + fn to_sum(&self) -> usize { + *self + } +} + /// The constant 1. Unit weight for unweighted problems. /// /// When used as the weight type parameter `W`, indicates that all weights @@ -553,6 +561,7 @@ use crate::impl_variant_param; impl_variant_param!(f64, "weight"); impl_variant_param!(i32, "weight", parent: f64, cast: |w| *w as f64); impl_variant_param!(One, "weight", parent: i32, cast: |_| 1i32); +impl_variant_param!(usize, "weight"); #[cfg(test)] #[path = "unit_tests/types.rs"] diff --git a/src/unit_tests/models/misc/minimum_tardiness_sequencing.rs b/src/unit_tests/models/misc/minimum_tardiness_sequencing.rs index fdcf29b6..4485cb34 100644 --- a/src/unit_tests/models/misc/minimum_tardiness_sequencing.rs +++ b/src/unit_tests/models/misc/minimum_tardiness_sequencing.rs @@ -1,10 +1,13 @@ use super::*; use crate::solvers::BruteForce; use crate::traits::Problem; +use crate::types::One; + +// ===== Unit-length variant (W = One) ===== #[test] fn test_minimum_tardiness_sequencing_basic() { - let problem = MinimumTardinessSequencing::new( + let problem = MinimumTardinessSequencing::::new( 5, vec![5, 5, 5, 3, 3], vec![(0, 3), (1, 3), (1, 4), (2, 4)], @@ -15,89 +18,65 @@ fn test_minimum_tardiness_sequencing_basic() { assert_eq!(problem.num_precedences(), 4); assert_eq!(problem.dims(), vec![5, 4, 3, 2, 1]); assert_eq!( - ::NAME, + as Problem>::NAME, "MinimumTardinessSequencing" ); - assert_eq!(::variant(), vec![]); } #[test] fn test_minimum_tardiness_sequencing_evaluate_optimal() { - // Example from issue: 5 tasks, optimal has 1 tardy task - let problem = MinimumTardinessSequencing::new( + let problem = MinimumTardinessSequencing::::new( 5, vec![5, 5, 5, 3, 3], vec![(0, 3), (1, 3), (1, 4), (2, 4)], ); - // Lehmer code [0,0,1,0,0] decodes to schedule [0,1,3,2,4]: - // available=[0,1,2,3,4] pick idx 0 -> 0; available=[1,2,3,4] pick idx 0 -> 1; - // available=[2,3,4] pick idx 1 -> 3; available=[2,4] pick idx 0 -> 2; available=[4] pick idx 0 -> 4. - // sigma: task 0 at pos 0, task 1 at pos 1, task 3 at pos 2, task 2 at pos 3, task 4 at pos 4. - // t0 finishes at 1 <= 5, t1 at 2 <= 5, t3 at 3 <= 3, t2 at 4 <= 5, t4 at 5 > 3 (tardy) let config = vec![0, 0, 1, 0, 0]; assert_eq!(problem.evaluate(&config), Min(Some(1))); } #[test] fn test_minimum_tardiness_sequencing_evaluate_invalid_lehmer() { - let problem = MinimumTardinessSequencing::new(3, vec![2, 3, 1], vec![]); - // dims = [3, 2, 1]; config [0, 2, 0] has 2 >= 2 (second dim), invalid Lehmer code + let problem = MinimumTardinessSequencing::::new(3, vec![2, 3, 1], vec![]); assert_eq!(problem.evaluate(&[0, 2, 0]), Min(None)); } #[test] fn test_minimum_tardiness_sequencing_evaluate_out_of_range() { - let problem = MinimumTardinessSequencing::new(3, vec![2, 3, 1], vec![]); - // dims = [3, 2, 1]; config [0, 1, 5] has 5 >= 1 (third dim), out of range + let problem = MinimumTardinessSequencing::::new(3, vec![2, 3, 1], vec![]); assert_eq!(problem.evaluate(&[0, 1, 5]), Min(None)); } #[test] fn test_minimum_tardiness_sequencing_evaluate_wrong_length() { - let problem = MinimumTardinessSequencing::new(3, vec![2, 3, 1], vec![]); + let problem = MinimumTardinessSequencing::::new(3, vec![2, 3, 1], vec![]); assert_eq!(problem.evaluate(&[0, 1]), Min(None)); assert_eq!(problem.evaluate(&[0, 1, 2, 3]), Min(None)); } #[test] fn test_minimum_tardiness_sequencing_evaluate_precedence_violation() { - let problem = MinimumTardinessSequencing::new( - 3, - vec![3, 3, 3], - vec![(0, 1)], // task 0 must precede task 1 - ); - // Lehmer [0,0,0] -> schedule [0,1,2] -> sigma [0,1,2]: sigma(0)=0 < sigma(1)=1, valid + let problem = MinimumTardinessSequencing::::new(3, vec![3, 3, 3], vec![(0, 1)]); assert_eq!(problem.evaluate(&[0, 0, 0]), Min(Some(0))); - // Lehmer [1,0,0] -> schedule [1,0,2] -> sigma [1,0,2]: sigma(0)=1 >= sigma(1)=0, violates assert_eq!(problem.evaluate(&[1, 0, 0]), Min(None)); - // Lehmer [2,1,0] -> schedule [2,1,0] -> sigma [2,1,0]: sigma(0)=2 >= sigma(1)=1, violates assert_eq!(problem.evaluate(&[2, 1, 0]), Min(None)); } #[test] fn test_minimum_tardiness_sequencing_evaluate_all_on_time() { - let problem = MinimumTardinessSequencing::new(3, vec![3, 3, 3], vec![]); - // All deadlines are 3, so any permutation of 3 tasks is on time - // Lehmer [0,0,0] -> schedule [0,1,2] + let problem = MinimumTardinessSequencing::::new(3, vec![3, 3, 3], vec![]); assert_eq!(problem.evaluate(&[0, 0, 0]), Min(Some(0))); - // Lehmer [2,1,0] -> schedule [2,1,0] assert_eq!(problem.evaluate(&[2, 1, 0]), Min(Some(0))); } #[test] fn test_minimum_tardiness_sequencing_evaluate_all_tardy() { - // Deadlines are all 0 (impossible to meet since earliest finish is 1) - // Wait: deadlines are usize and d(t)=0 means finish must be <= 0, but finish is at least 1 - // Actually, let's use deadlines that can't be met - let problem = MinimumTardinessSequencing::new(2, vec![0, 0], vec![]); - // Lehmer [0,0] -> schedule [0,1] -> sigma [0,1] - // pos 0 finishes at 1 > 0 (tardy), pos 1 finishes at 2 > 0 (tardy) + let problem = MinimumTardinessSequencing::::new(2, vec![0, 0], vec![]); assert_eq!(problem.evaluate(&[0, 0]), Min(Some(2))); } #[test] fn test_minimum_tardiness_sequencing_brute_force() { - let problem = MinimumTardinessSequencing::new( + let problem = MinimumTardinessSequencing::::new( 5, vec![5, 5, 5, 3, 3], vec![(0, 3), (1, 3), (1, 4), (2, 4)], @@ -107,29 +86,25 @@ fn test_minimum_tardiness_sequencing_brute_force() { .find_witness(&problem) .expect("should find a solution"); let metric = problem.evaluate(&solution); - // Optimal is 1 tardy task assert_eq!(metric, Min(Some(1))); } #[test] fn test_minimum_tardiness_sequencing_brute_force_no_precedences() { - // Without precedences, Moore's algorithm gives optimal - // 3 tasks: deadlines 1, 3, 2. Best is to schedule task with deadline 1 first. - let problem = MinimumTardinessSequencing::new(3, vec![1, 3, 2], vec![]); + let problem = MinimumTardinessSequencing::::new(3, vec![1, 3, 2], vec![]); let solver = BruteForce::new(); let solution = solver .find_witness(&problem) .expect("should find a solution"); let metric = problem.evaluate(&solution); - // All can be on time: t0 at pos 0 (finish 1 <= 1), t2 at pos 1 (finish 2 <= 2), t1 at pos 2 (finish 3 <= 3) assert_eq!(metric, Min(Some(0))); } #[test] fn test_minimum_tardiness_sequencing_serialization() { - let problem = MinimumTardinessSequencing::new(3, vec![2, 3, 1], vec![(0, 1)]); + let problem = MinimumTardinessSequencing::::new(3, vec![2, 3, 1], vec![(0, 1)]); let json = serde_json::to_value(&problem).unwrap(); - let restored: MinimumTardinessSequencing = serde_json::from_value(json).unwrap(); + let restored: MinimumTardinessSequencing = serde_json::from_value(json).unwrap(); assert_eq!(restored.num_tasks(), problem.num_tasks()); assert_eq!(restored.deadlines(), problem.deadlines()); assert_eq!(restored.precedences(), problem.precedences()); @@ -137,7 +112,7 @@ fn test_minimum_tardiness_sequencing_serialization() { #[test] fn test_minimum_tardiness_sequencing_empty() { - let problem = MinimumTardinessSequencing::new(0, vec![], vec![]); + let problem = MinimumTardinessSequencing::::new(0, vec![], vec![]); assert_eq!(problem.num_tasks(), 0); assert_eq!(problem.dims(), Vec::::new()); assert_eq!(problem.evaluate(&[]), Min(Some(0))); @@ -145,32 +120,122 @@ fn test_minimum_tardiness_sequencing_empty() { #[test] fn test_minimum_tardiness_sequencing_single_task() { - let problem = MinimumTardinessSequencing::new(1, vec![1], vec![]); + let problem = MinimumTardinessSequencing::::new(1, vec![1], vec![]); assert_eq!(problem.dims(), vec![1]); - // Task at position 0, finishes at 1 <= 1, not tardy assert_eq!(problem.evaluate(&[0]), Min(Some(0))); - let problem_tardy = MinimumTardinessSequencing::new(1, vec![0], vec![]); - // Task at position 0, finishes at 1 > 0, tardy + let problem_tardy = MinimumTardinessSequencing::::new(1, vec![0], vec![]); assert_eq!(problem_tardy.evaluate(&[0]), Min(Some(1))); } #[test] #[should_panic(expected = "deadlines length must equal num_tasks")] fn test_minimum_tardiness_sequencing_mismatched_deadlines() { - MinimumTardinessSequencing::new(3, vec![1, 2], vec![]); + MinimumTardinessSequencing::::new(3, vec![1, 2], vec![]); } #[test] #[should_panic(expected = "predecessor index 5 out of range")] fn test_minimum_tardiness_sequencing_invalid_precedence() { - MinimumTardinessSequencing::new(3, vec![1, 2, 3], vec![(5, 0)]); + MinimumTardinessSequencing::::new(3, vec![1, 2, 3], vec![(5, 0)]); } #[test] fn test_minimum_tardiness_sequencing_cyclic_precedences() { - // Cyclic precedences: 0 -> 1 -> 2 -> 0. No valid schedule exists. - let problem = MinimumTardinessSequencing::new(3, vec![3, 3, 3], vec![(0, 1), (1, 2), (2, 0)]); + let problem = + MinimumTardinessSequencing::::new(3, vec![3, 3, 3], vec![(0, 1), (1, 2), (2, 0)]); let solver = BruteForce::new(); assert!(solver.find_witness(&problem).is_none()); } + +// ===== Arbitrary-length variant (W = usize) ===== + +#[test] +fn test_minimum_tardiness_sequencing_weighted_basic() { + let problem = MinimumTardinessSequencing::::with_lengths( + vec![3, 2, 2, 1, 2], + vec![4, 3, 8, 3, 6], + vec![(0, 2), (1, 3)], + ); + assert_eq!(problem.num_tasks(), 5); + assert_eq!(problem.lengths(), &[3, 2, 2, 1, 2]); + assert_eq!(problem.deadlines(), &[4, 3, 8, 3, 6]); + assert_eq!(problem.num_precedences(), 2); +} + +#[test] +fn test_minimum_tardiness_sequencing_weighted_evaluate() { + // Issue example: 5 tasks, lengths [3,2,2,1,2], deadlines [4,3,8,3,6], prec (0→2, 1→3) + // Schedule: t0,t4,t2,t1,t3 + // Lehmer [0,3,1,0,0] -> schedule [0,4,2,1,3] + let problem = MinimumTardinessSequencing::::with_lengths( + vec![3, 2, 2, 1, 2], + vec![4, 3, 8, 3, 6], + vec![(0, 2), (1, 3)], + ); + // t0(l=3): finish=3, deadline=4 → on time + // t4(l=2): finish=5, deadline=6 → on time + // t2(l=2): finish=7, deadline=8 → on time + // t1(l=2): finish=9, deadline=3 → tardy + // t3(l=1): finish=10, deadline=3 → tardy + assert_eq!(problem.evaluate(&[0, 3, 1, 0, 0]), Min(Some(2))); +} + +#[test] +fn test_minimum_tardiness_sequencing_weighted_brute_force() { + let problem = MinimumTardinessSequencing::::with_lengths( + vec![3, 2, 2, 1, 2], + vec![4, 3, 8, 3, 6], + vec![(0, 2), (1, 3)], + ); + let solver = BruteForce::new(); + let solution = solver + .find_witness(&problem) + .expect("should find a solution"); + let metric = problem.evaluate(&solution); + assert_eq!(metric, Min(Some(2))); +} + +#[test] +fn test_minimum_tardiness_sequencing_weighted_serialization() { + let problem = MinimumTardinessSequencing::::with_lengths( + vec![3, 2, 2], + vec![4, 3, 8], + vec![(0, 1)], + ); + let json = serde_json::to_value(&problem).unwrap(); + let restored: MinimumTardinessSequencing = serde_json::from_value(json).unwrap(); + assert_eq!(restored.num_tasks(), problem.num_tasks()); + assert_eq!(restored.lengths(), problem.lengths()); + assert_eq!(restored.deadlines(), problem.deadlines()); +} + +#[test] +fn test_minimum_tardiness_sequencing_weighted_different_lengths() { + // 3 tasks: lengths [1,5,1], deadlines [2,6,3] + // Schedule [0,2,1]: t0(l=1,fin=1≤2✓), t2(l=1,fin=2≤3✓), t1(l=5,fin=7>6✗) → 1 tardy + // Schedule [0,1,2]: t0(l=1,fin=1≤2✓), t1(l=5,fin=6≤6✓), t2(l=1,fin=7>3✗) → 1 tardy + // Schedule [1,0,2]: t1(l=5,fin=5≤6✓), t0(l=1,fin=6>2✗), t2(l=1,fin=7>3✗) → 2 tardy + let problem = + MinimumTardinessSequencing::::with_lengths(vec![1, 5, 1], vec![2, 6, 3], vec![]); + let solver = BruteForce::new(); + let solution = solver + .find_witness(&problem) + .expect("should find a solution"); + assert_eq!(problem.evaluate(&solution), Min(Some(1))); +} + +#[test] +#[should_panic(expected = "all task lengths must be positive")] +fn test_minimum_tardiness_sequencing_weighted_zero_length() { + MinimumTardinessSequencing::::with_lengths(vec![1, 0, 2], vec![3, 3, 3], vec![]); +} + +#[test] +fn test_minimum_tardiness_sequencing_paper_example() { + // Issue example (unit-length): 4 tasks, deadlines [2,3,1,4], prec (0→2) + // Lehmer [0,0,0,0] = schedule [0,1,2,3] + // t0: finish=1≤2✓, t1: finish=2≤3✓, t2: finish=3>1✗, t3: finish=4≤4✓ → 1 tardy + let problem = MinimumTardinessSequencing::::new(4, vec![2, 3, 1, 4], vec![(0, 2)]); + assert_eq!(problem.evaluate(&[0, 0, 0, 0]), Min(Some(1))); +} diff --git a/src/unit_tests/rules/minimumtardinesssequencing_ilp.rs b/src/unit_tests/rules/minimumtardinesssequencing_ilp.rs index 2beb3ee2..21a0deae 100644 --- a/src/unit_tests/rules/minimumtardinesssequencing_ilp.rs +++ b/src/unit_tests/rules/minimumtardinesssequencing_ilp.rs @@ -3,10 +3,13 @@ use crate::models::algebraic::ILP; use crate::rules::test_helpers::assert_optimization_round_trip_from_optimization_target; use crate::solvers::{BruteForce, ILPSolver}; use crate::traits::Problem; +use crate::types::One; + +// ===== Unit-length variant ===== #[test] fn test_minimumtardinesssequencing_to_ilp_closed_loop() { - let problem = MinimumTardinessSequencing::new(3, vec![2, 3, 1], vec![(0, 2)]); + let problem = MinimumTardinessSequencing::::new(3, vec![2, 3, 1], vec![(0, 2)]); let reduction = ReduceTo::>::reduce_to(&problem); assert_optimization_round_trip_from_optimization_target( @@ -18,7 +21,7 @@ fn test_minimumtardinesssequencing_to_ilp_closed_loop() { #[test] fn test_minimumtardinesssequencing_to_ilp_bf_vs_ilp() { - let problem = MinimumTardinessSequencing::new(4, vec![2, 3, 1, 4], vec![(0, 2)]); + let problem = MinimumTardinessSequencing::::new(4, vec![2, 3, 1, 4], vec![(0, 2)]); let reduction = ReduceTo::>::reduce_to(&problem); let bf = BruteForce::new(); @@ -37,7 +40,7 @@ fn test_minimumtardinesssequencing_to_ilp_bf_vs_ilp() { #[test] fn test_minimumtardinesssequencing_to_ilp_no_precedences() { - let problem = MinimumTardinessSequencing::new(3, vec![1, 2, 3], vec![]); + let problem = MinimumTardinessSequencing::::new(3, vec![1, 2, 3], vec![]); let reduction = ReduceTo::>::reduce_to(&problem); let ilp_solution = ILPSolver::new() @@ -49,8 +52,7 @@ fn test_minimumtardinesssequencing_to_ilp_no_precedences() { #[test] fn test_minimumtardinesssequencing_to_ilp_all_tight() { - // All deadlines equal 1: only one task can be on time - let problem = MinimumTardinessSequencing::new(3, vec![1, 1, 1], vec![]); + let problem = MinimumTardinessSequencing::::new(3, vec![1, 1, 1], vec![]); let reduction = ReduceTo::>::reduce_to(&problem); let ilp_solution = ILPSolver::new() @@ -59,6 +61,46 @@ fn test_minimumtardinesssequencing_to_ilp_all_tight() { let extracted = reduction.extract_solution(&ilp_solution); let value = problem.evaluate(&extracted); assert!(value.is_valid()); - // At most 2 tardy tasks (only first task is on time if d=1) assert_eq!(value.0, Some(2)); } + +// ===== Arbitrary-length variant ===== + +#[test] +fn test_minimumtardinesssequencing_weighted_to_ilp_closed_loop() { + let problem = MinimumTardinessSequencing::::with_lengths( + vec![2, 1, 3], + vec![3, 4, 5], + vec![(0, 2)], + ); + let reduction = ReduceTo::>::reduce_to(&problem); + + assert_optimization_round_trip_from_optimization_target( + &problem, + &reduction, + "MinimumTardinessSequencing->ILP closed loop", + ); +} + +#[test] +fn test_minimumtardinesssequencing_weighted_to_ilp_vs_brute_force() { + let problem = MinimumTardinessSequencing::::with_lengths( + vec![3, 2, 2, 1, 2], + vec![4, 3, 8, 3, 6], + vec![(0, 2), (1, 3)], + ); + + let bf = BruteForce::new(); + let bf_witness = bf.find_witness(&problem).expect("should have solution"); + let bf_value = problem.evaluate(&bf_witness); + + let reduction = ReduceTo::>::reduce_to(&problem); + let ilp_solution = ILPSolver::new() + .solve(reduction.target_problem()) + .expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + let ilp_value = problem.evaluate(&extracted); + + assert_eq!(bf_value, ilp_value); + assert_eq!(ilp_value.0, Some(2)); +} diff --git a/src/unit_tests/trait_consistency.rs b/src/unit_tests/trait_consistency.rs index e7f89c40..d1be4fde 100644 --- a/src/unit_tests/trait_consistency.rs +++ b/src/unit_tests/trait_consistency.rs @@ -207,7 +207,7 @@ fn test_all_problems_implement_trait_correctly() { "SequencingToMinimizeWeightedTardiness", ); check_problem_trait( - &MinimumTardinessSequencing::new(3, vec![2, 3, 1], vec![(0, 2)]), + &MinimumTardinessSequencing::::new(3, vec![2, 3, 1], vec![(0, 2)]), "MinimumTardinessSequencing", ); check_problem_trait( From a37e58deceecbb37ffd6da7804a9a02236d57632 Mon Sep 17 00:00:00 2001 From: Xiwei Pan Date: Mon, 30 Mar 2026 04:42:00 +0800 Subject: [PATCH 03/21] feat: add SequencingToMinimizeTardyTaskWeight model (#496) Implement the weighted tardy task scheduling problem (GJ SS3) with direct ILP reduction, CLI support, canonical example, and paper entry. Co-Authored-By: Claude Opus 4.6 (1M context) --- docs/paper/reductions.typ | 61 ++++- problemreductions-cli/src/cli.rs | 1 + problemreductions-cli/src/commands/create.rs | 59 ++++- src/models/misc/mod.rs | 4 + ...equencing_to_minimize_tardy_task_weight.rs | 214 ++++++++++++++++++ src/models/mod.rs | 10 +- src/rules/mod.rs | 3 + ...sequencingtominimizetardytaskweight_ilp.rs | 116 ++++++++++ ...equencing_to_minimize_tardy_task_weight.rs | 198 ++++++++++++++++ ...sequencingtominimizetardytaskweight_ilp.rs | 84 +++++++ 10 files changed, 740 insertions(+), 10 deletions(-) create mode 100644 src/models/misc/sequencing_to_minimize_tardy_task_weight.rs create mode 100644 src/rules/sequencingtominimizetardytaskweight_ilp.rs create mode 100644 src/unit_tests/models/misc/sequencing_to_minimize_tardy_task_weight.rs create mode 100644 src/unit_tests/rules/sequencingtominimizetardytaskweight_ilp.rs diff --git a/docs/paper/reductions.typ b/docs/paper/reductions.typ index 78eb412e..8509a419 100644 --- a/docs/paper/reductions.typ +++ b/docs/paper/reductions.typ @@ -188,6 +188,7 @@ "SchedulingToMinimizeWeightedCompletionTime": [Scheduling to Minimize Weighted Completion Time], "SchedulingWithIndividualDeadlines": [Scheduling With Individual Deadlines], "SequencingToMinimizeMaximumCumulativeCost": [Sequencing to Minimize Maximum Cumulative Cost], + "SequencingToMinimizeTardyTaskWeight": [Sequencing to Minimize Tardy Task Weight], "SequencingToMinimizeWeightedCompletionTime": [Sequencing to Minimize Weighted Completion Time], "SequencingToMinimizeWeightedTardiness": [Sequencing to Minimize Weighted Tardiness], "SequencingWithReleaseTimesAndDeadlines": [Sequencing with Release Times and Deadlines], @@ -6431,7 +6432,47 @@ A classical NP-complete problem from Garey and Johnson @garey1979[Ch.~3, p.~76], ] } -#{ +#{ + let x = load-model-example("SequencingToMinimizeTardyTaskWeight") + let lengths = x.instance.lengths + let weights = x.instance.weights + let deadlines = x.instance.deadlines + let ntasks = lengths.len() + let schedule = x.optimal_config + let completions = { + let t = 0 + let result = () + for task in schedule { + t += lengths.at(task) + result.push(t) + } + result + } + let tardy-weights = schedule.enumerate().map(((pos, task)) => { + if completions.at(pos) > deadlines.at(task) { weights.at(task) } else { 0 } + }) + [ + #problem-def("SequencingToMinimizeTardyTaskWeight")[ + Given a set $T$ of $n$ tasks, a processing-time function $ell: T -> ZZ^+$, a weight function $w: T -> ZZ^+$, and a deadline function $d: T -> ZZ^+$, find a one-machine schedule that minimizes + $sum_(t in T) w(t) U(t),$ + where $U(t) = 1$ if the completion time $C(t) > d(t)$ (task $t$ is tardy) and $U(t) = 0$ otherwise. + ][ + Sequencing to Minimize Tardy Task Weight is problem SS8 in Garey & Johnson @garey1979, usually written $1 || sum w_j U_j$. The unweighted variant $1 || sum U_j$ (minimize number of tardy tasks) is solvable in $O(n log n)$ by Moore's algorithm @moore1968, but the weighted version is NP-complete. The problem is closely related to $1 || sum w_j T_j$ (Sequencing to Minimize Weighted Tardiness) but differs in using a 0/1 indicator $U_j$ instead of the actual tardiness $T_j = max(0, C_j - d_j)$. + + Configurations are direct permutation encodings: the config vector $(sigma_0, dots, sigma_{n-1})$ specifies which task occupies each position, i.e., $sigma_p$ is the index of the task scheduled at position $p$. A configuration is valid iff it is a permutation of $\{0, dots, n-1\}$. + + *Example.* Consider $n = #ntasks$ tasks with lengths $ell = (#lengths.map(v => str(v)).join(", "))$, weights $w = (#weights.map(v => str(v)).join(", "))$, and deadlines $d = (#deadlines.map(v => str(v)).join(", "))$. The optimal schedule $(#schedule.map(t => $t_(#(t + 1))$).join(", "))$ achieves completion times $(#completions.map(v => str(v)).join(", "))$. Tasks with completion time exceeding their deadline contribute weights $(#tardy-weights.map(v => str(v)).join(", "))$, giving total tardy weight $#x.optimal_value$. + + #pred-commands( + "pred create --example SequencingToMinimizeTardyTaskWeight -o sequencing-to-minimize-tardy-task-weight.json", + "pred solve sequencing-to-minimize-tardy-task-weight.json", + "pred evaluate sequencing-to-minimize-tardy-task-weight.json --config " + x.optimal_config.map(str).join(","), + ) + ] + ] +} + +#{ let x = load-model-example("IntegralFlowHomologousArcs") let arcs = x.instance.graph.arcs let sol = x.optimal_config @@ -9465,6 +9506,24 @@ The following reductions to Integer Linear Programming are straightforward formu _Solution extraction._ Decode the position assignment and convert the resulting permutation to Lehmer code. ] +#reduction-rule("SequencingToMinimizeTardyTaskWeight", "ILP")[ + Place each task in exactly one schedule position with a binary tardy indicator forced on whenever the completion time at that position exceeds the task's deadline. +][ + _Construction._ Variables: binary $x_(j,p)$ with $x_(j,p) = 1$ iff task $j$ occupies position $p$, and binary tardy indicator $u_j$. Let $M = sum_j ell_j$. The ILP is: + $ + "minimize" quad & sum_j w_j u_j \ + "subject to" quad & sum_p x_(j,p) = 1 quad forall j \ + & sum_j x_(j,p) = 1 quad forall p \ + & M x_(j,p) + sum_(p' < p) sum_(j') ell_(j') x_(j',p') - M u_j <= d_j - ell_j + M quad forall j, p \ + & x_(j,p) in {0, 1}, u_j in {0, 1} + $. + The third family of constraints enforces: if task $j$ is at position $p$ (so $x_(j,p) = 1$), then its completion time $ell_j + sum_(p' < p) sum_(j') ell_(j') x_(j',p')$ exceeds $d_j$ only when $u_j = 1$. + + _Correctness._ ($arrow.r.double$) Any schedule induces completion times; for each tardy task the big-$M$ constraint forces $u_j = 1$, so the objective counts exactly the total tardy weight. ($arrow.l.double$) Any feasible ILP assignment is a valid permutation (by the assignment constraints) and the tardy indicators agree with the actual completion times. + + _Solution extraction._ Read the unique position $p$ with $x_(j,p) = 1$ for each task $j$ to recover the schedule permutation. +] + #reduction-rule("SequencingToMinimizeWeightedTardiness", "ILP")[ Encode the single-machine order with pairwise precedence bits and completion times, then linearize the weighted tardiness bound with nonnegative tardiness variables. ][ diff --git a/problemreductions-cli/src/cli.rs b/problemreductions-cli/src/cli.rs index a20f5321..226c56d0 100644 --- a/problemreductions-cli/src/cli.rs +++ b/problemreductions-cli/src/cli.rs @@ -312,6 +312,7 @@ Flags by problem type: RectilinearPictureCompression --matrix (0/1), --k SchedulingWithIndividualDeadlines --n, --num-processors/--m, --deadlines [--precedence-pairs] SequencingToMinimizeMaximumCumulativeCost --costs [--precedence-pairs] + SequencingToMinimizeTardyTaskWeight --sizes, --weights, --deadlines SequencingToMinimizeWeightedCompletionTime --lengths, --weights [--precedence-pairs] SequencingToMinimizeWeightedTardiness --sizes, --weights, --deadlines, --bound MinimumExternalMacroDataCompression --string, --pointer-cost [--alphabet-size] diff --git a/problemreductions-cli/src/commands/create.rs b/problemreductions-cli/src/commands/create.rs index 20195f5d..b73fc011 100644 --- a/problemreductions-cli/src/commands/create.rs +++ b/problemreductions-cli/src/commands/create.rs @@ -30,10 +30,10 @@ use problemreductions::models::misc::{ PaintShop, PartiallyOrderedKnapsack, ProductionPlanning, QueryArg, RectilinearPictureCompression, RegisterSufficiency, ResourceConstrainedScheduling, SchedulingToMinimizeWeightedCompletionTime, SchedulingWithIndividualDeadlines, - SequencingToMinimizeMaximumCumulativeCost, SequencingToMinimizeWeightedCompletionTime, - SequencingToMinimizeWeightedTardiness, SequencingWithReleaseTimesAndDeadlines, - SequencingWithinIntervals, ShortestCommonSupersequence, StringToStringCorrection, SubsetSum, - SumOfSquaresPartition, ThreePartition, TimetableDesign, + SequencingToMinimizeMaximumCumulativeCost, SequencingToMinimizeTardyTaskWeight, + SequencingToMinimizeWeightedCompletionTime, SequencingToMinimizeWeightedTardiness, + SequencingWithReleaseTimesAndDeadlines, SequencingWithinIntervals, ShortestCommonSupersequence, + StringToStringCorrection, SubsetSum, SumOfSquaresPartition, ThreePartition, TimetableDesign, }; use problemreductions::models::BiconnectivityAugmentation; use problemreductions::prelude::*; @@ -3685,6 +3685,57 @@ pub fn create(args: &CreateArgs, out: &OutputConfig) -> Result<()> { ) } + // SequencingToMinimizeTardyTaskWeight + "SequencingToMinimizeTardyTaskWeight" => { + let sizes_str = args.sizes.as_deref().ok_or_else(|| { + anyhow::anyhow!( + "SequencingToMinimizeTardyTaskWeight requires --sizes, --weights, and --deadlines\n\n\ + Usage: pred create SequencingToMinimizeTardyTaskWeight --sizes 3,2,4,1,2 --weights 5,3,7,2,4 --deadlines 6,4,10,2,8" + ) + })?; + let weights_str = args.weights.as_deref().ok_or_else(|| { + anyhow::anyhow!( + "SequencingToMinimizeTardyTaskWeight requires --weights\n\n\ + Usage: pred create SequencingToMinimizeTardyTaskWeight --sizes 3,2,4,1,2 --weights 5,3,7,2,4 --deadlines 6,4,10,2,8" + ) + })?; + let deadlines_str = args.deadlines.as_deref().ok_or_else(|| { + anyhow::anyhow!( + "SequencingToMinimizeTardyTaskWeight requires --deadlines\n\n\ + Usage: pred create SequencingToMinimizeTardyTaskWeight --sizes 3,2,4,1,2 --weights 5,3,7,2,4 --deadlines 6,4,10,2,8" + ) + })?; + let lengths: Vec = util::parse_comma_list(sizes_str)?; + let weights: Vec = util::parse_comma_list(weights_str)?; + let deadlines: Vec = util::parse_comma_list(deadlines_str)?; + anyhow::ensure!( + lengths.len() == weights.len(), + "sizes length ({}) must equal weights length ({})", + lengths.len(), + weights.len() + ); + anyhow::ensure!( + lengths.len() == deadlines.len(), + "sizes length ({}) must equal deadlines length ({})", + lengths.len(), + deadlines.len() + ); + anyhow::ensure!( + lengths.iter().all(|&l| l > 0), + "task lengths must be positive" + ); + anyhow::ensure!( + weights.iter().all(|&w| w > 0), + "task weights must be positive" + ); + ( + ser(SequencingToMinimizeTardyTaskWeight::new( + lengths, weights, deadlines, + ))?, + resolved_variant.clone(), + ) + } + // SequencingToMinimizeWeightedCompletionTime "SequencingToMinimizeWeightedCompletionTime" => { let lengths_str = args.lengths.as_deref().ok_or_else(|| { diff --git a/src/models/misc/mod.rs b/src/models/misc/mod.rs index ead9c9bc..ccda3a4b 100644 --- a/src/models/misc/mod.rs +++ b/src/models/misc/mod.rs @@ -31,6 +31,7 @@ //! - [`SchedulingWithIndividualDeadlines`]: Meet per-task deadlines on parallel processors //! - [`StackerCrane`]: Minimize the total length of a closed walk through required arcs //! - [`SequencingToMinimizeMaximumCumulativeCost`]: Keep every cumulative schedule cost prefix under a bound +//! - [`SequencingToMinimizeTardyTaskWeight`]: Minimize total weight of tardy tasks //! - [`SequencingToMinimizeWeightedCompletionTime`]: Minimize total weighted completion time //! - [`SequencingToMinimizeWeightedTardiness`]: Decide whether a schedule meets a weighted tardiness bound //! - [`SequencingWithReleaseTimesAndDeadlines`]: Single-machine scheduling feasibility @@ -99,6 +100,7 @@ pub(crate) mod resource_constrained_scheduling; mod scheduling_to_minimize_weighted_completion_time; mod scheduling_with_individual_deadlines; mod sequencing_to_minimize_maximum_cumulative_cost; +mod sequencing_to_minimize_tardy_task_weight; mod sequencing_to_minimize_weighted_completion_time; mod sequencing_to_minimize_weighted_tardiness; mod sequencing_with_release_times_and_deadlines; @@ -147,6 +149,7 @@ pub use resource_constrained_scheduling::ResourceConstrainedScheduling; pub use scheduling_to_minimize_weighted_completion_time::SchedulingToMinimizeWeightedCompletionTime; pub use scheduling_with_individual_deadlines::SchedulingWithIndividualDeadlines; pub use sequencing_to_minimize_maximum_cumulative_cost::SequencingToMinimizeMaximumCumulativeCost; +pub use sequencing_to_minimize_tardy_task_weight::SequencingToMinimizeTardyTaskWeight; pub use sequencing_to_minimize_weighted_completion_time::SequencingToMinimizeWeightedCompletionTime; pub use sequencing_to_minimize_weighted_tardiness::SequencingToMinimizeWeightedTardiness; pub use sequencing_with_release_times_and_deadlines::SequencingWithReleaseTimesAndDeadlines; @@ -193,6 +196,7 @@ pub(crate) fn canonical_model_example_specs() -> Vec", description: "Processing time for each task" }, + FieldInfo { name: "weights", type_name: "Vec", description: "Weight w(t) for each task" }, + FieldInfo { name: "deadlines", type_name: "Vec", description: "Deadline d(t) for each task" }, + ], + } +} + +/// Sequencing to Minimize Tardy Task Weight problem. +/// +/// Given tasks with processing times `l(t)`, weights `w(t)`, and deadlines +/// `d(t)`, find a single-machine schedule that minimizes `sum_{t tardy} w(t)`, +/// where task `t` is tardy if its completion time `C(t) > d(t)`. +/// +/// This is the weighted generalization of minimizing the number of tardy tasks +/// (problem SS8 in Garey & Johnson, 1979, written $1 || sum w_j U_j$). +/// +/// Configurations are direct permutation encodings with `dims() = [n; n]`: +/// each position holds the index of the task scheduled at that position. +/// A configuration is valid iff it is a permutation of `0..n`. +#[derive(Debug, Clone, Serialize)] +pub struct SequencingToMinimizeTardyTaskWeight { + lengths: Vec, + weights: Vec, + deadlines: Vec, +} + +#[derive(Deserialize)] +struct SequencingToMinimizeTardyTaskWeightSerde { + lengths: Vec, + weights: Vec, + deadlines: Vec, +} + +impl SequencingToMinimizeTardyTaskWeight { + fn validate(lengths: &[u64], weights: &[u64], deadlines: &[u64]) -> Result<(), String> { + if lengths.len() != weights.len() { + return Err("lengths length must equal weights length".to_string()); + } + if lengths.len() != deadlines.len() { + return Err("lengths length must equal deadlines length".to_string()); + } + if lengths.contains(&0) { + return Err("task lengths must be positive".to_string()); + } + if weights.contains(&0) { + return Err("task weights must be positive".to_string()); + } + Ok(()) + } + + /// Create a new sequencing instance. + /// + /// # Panics + /// + /// Panics if `lengths`, `weights`, and `deadlines` are not all the same + /// length, or if any length or weight is zero. + pub fn new(lengths: Vec, weights: Vec, deadlines: Vec) -> Self { + Self::validate(&lengths, &weights, &deadlines).unwrap_or_else(|err| panic!("{err}")); + Self { + lengths, + weights, + deadlines, + } + } + + /// Returns the number of tasks. + pub fn num_tasks(&self) -> usize { + self.lengths.len() + } + + /// Returns the processing times. + pub fn lengths(&self) -> &[u64] { + &self.lengths + } + + /// Returns the task weights. + pub fn weights(&self) -> &[u64] { + &self.weights + } + + /// Returns the task deadlines. + pub fn deadlines(&self) -> &[u64] { + &self.deadlines + } + + /// Decode a direct permutation configuration. + /// + /// Returns the schedule as `Some(Vec)` if the config is a valid + /// permutation of `0..n`, or `None` otherwise. + fn decode_permutation(config: &[usize], n: usize) -> Option> { + if config.len() != n { + return None; + } + let mut seen = vec![false; n]; + for &task in config { + if task >= n || seen[task] { + return None; + } + seen[task] = true; + } + Some(config.to_vec()) + } + + fn tardy_task_weight(&self, schedule: &[usize]) -> Min { + let mut elapsed: u64 = 0; + let mut total: u64 = 0; + for &task in schedule { + elapsed = elapsed + .checked_add(self.lengths[task]) + .expect("total processing time overflowed u64"); + if elapsed > self.deadlines[task] { + total = total + .checked_add(self.weights[task]) + .expect("tardy task weight overflowed u64"); + } + } + Min(Some(total)) + } +} + +impl TryFrom for SequencingToMinimizeTardyTaskWeight { + type Error = String; + + fn try_from(value: SequencingToMinimizeTardyTaskWeightSerde) -> Result { + Self::validate(&value.lengths, &value.weights, &value.deadlines)?; + Ok(Self { + lengths: value.lengths, + weights: value.weights, + deadlines: value.deadlines, + }) + } +} + +impl<'de> Deserialize<'de> for SequencingToMinimizeTardyTaskWeight { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + let value = SequencingToMinimizeTardyTaskWeightSerde::deserialize(deserializer)?; + Self::try_from(value).map_err(serde::de::Error::custom) + } +} + +impl Problem for SequencingToMinimizeTardyTaskWeight { + const NAME: &'static str = "SequencingToMinimizeTardyTaskWeight"; + type Value = Min; + + fn variant() -> Vec<(&'static str, &'static str)> { + crate::variant_params![] + } + + fn dims(&self) -> Vec { + let n = self.num_tasks(); + vec![n; n] + } + + fn evaluate(&self, config: &[usize]) -> Min { + let n = self.num_tasks(); + let Some(schedule) = Self::decode_permutation(config, n) else { + return Min(None); + }; + self.tardy_task_weight(&schedule) + } +} + +crate::declare_variants! { + default SequencingToMinimizeTardyTaskWeight => "factorial(num_tasks)", +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_model_example_specs() -> Vec { + vec![crate::example_db::specs::ModelExampleSpec { + id: "sequencing_to_minimize_tardy_task_weight", + // 5 tasks, lengths [3,2,4,1,2], weights [5,3,7,2,4], deadlines [6,4,10,2,8] + // Optimal schedule: [t4,t1,t5,t3,t2] = config [3,0,4,2,1] + // Start times: t4 starts 0, completes 1 (tardy: C=1 <= d=2, ok) + // t1 starts 1, completes 4 (tardy: C=4 <= d=6, ok) + // t5 starts 4, completes 6 (tardy: C=6 <= d=8, ok) + // t3 starts 6, completes 10 (tardy: C=10 <= d=10, ok) + // t2 starts 10, completes 12 (tardy: C=12 > d=4, tardy weight 3) + // Total tardy weight = 3 + instance: Box::new(SequencingToMinimizeTardyTaskWeight::new( + vec![3, 2, 4, 1, 2], + vec![5, 3, 7, 2, 4], + vec![6, 4, 10, 2, 8], + )), + optimal_config: vec![3, 0, 4, 2, 1], + optimal_value: serde_json::json!(3), + }] +} + +#[cfg(test)] +#[path = "../../unit_tests/models/misc/sequencing_to_minimize_tardy_task_weight.rs"] +mod tests; diff --git a/src/models/mod.rs b/src/models/mod.rs index 1faed96a..82d7a06e 100644 --- a/src/models/mod.rs +++ b/src/models/mod.rs @@ -46,11 +46,11 @@ pub use misc::{ PaintShop, Partition, PrecedenceConstrainedScheduling, ProductionPlanning, QueryArg, RectilinearPictureCompression, RegisterSufficiency, ResourceConstrainedScheduling, SchedulingToMinimizeWeightedCompletionTime, SchedulingWithIndividualDeadlines, - SequencingToMinimizeMaximumCumulativeCost, SequencingToMinimizeWeightedCompletionTime, - SequencingToMinimizeWeightedTardiness, SequencingWithReleaseTimesAndDeadlines, - SequencingWithinIntervals, ShortestCommonSupersequence, StackerCrane, StaffScheduling, - StringToStringCorrection, SubsetSum, SumOfSquaresPartition, Term, ThreePartition, - TimetableDesign, + SequencingToMinimizeMaximumCumulativeCost, SequencingToMinimizeTardyTaskWeight, + SequencingToMinimizeWeightedCompletionTime, SequencingToMinimizeWeightedTardiness, + SequencingWithReleaseTimesAndDeadlines, SequencingWithinIntervals, ShortestCommonSupersequence, + StackerCrane, StaffScheduling, StringToStringCorrection, SubsetSum, SumOfSquaresPartition, + Term, ThreePartition, TimetableDesign, }; pub use set::{ ComparativeContainment, ConsecutiveSets, ExactCoverBy3Sets, IntegerKnapsack, MaximumSetPacking, diff --git a/src/rules/mod.rs b/src/rules/mod.rs index a9a4cc73..1933cd14 100644 --- a/src/rules/mod.rs +++ b/src/rules/mod.rs @@ -214,6 +214,8 @@ pub(crate) mod schedulingwithindividualdeadlines_ilp; #[cfg(feature = "ilp-solver")] pub(crate) mod sequencingtominimizemaximumcumulativecost_ilp; #[cfg(feature = "ilp-solver")] +pub(crate) mod sequencingtominimizetardytaskweight_ilp; +#[cfg(feature = "ilp-solver")] pub(crate) mod sequencingtominimizeweightedcompletiontime_ilp; #[cfg(feature = "ilp-solver")] pub(crate) mod sequencingtominimizeweightedtardiness_ilp; @@ -381,6 +383,7 @@ pub(crate) fn canonical_rule_example_specs() -> Vec. +//! +//! Position-assignment ILP: binary x_{j,p} placing task j in position p, +//! with binary tardy indicator u_j. A big-M constraint forces u_j = 1 +//! whenever the completion time at position p exceeds the deadline d_j. + +use crate::models::algebraic::{LinearConstraint, ObjectiveSense, ILP}; +use crate::models::misc::SequencingToMinimizeTardyTaskWeight; +use crate::reduction; +use crate::rules::ilp_helpers::one_hot_decode; +use crate::rules::traits::{ReduceTo, ReductionResult}; + +/// Result of reducing SequencingToMinimizeTardyTaskWeight to ILP. +#[derive(Debug, Clone)] +pub struct ReductionSTMTTWToILP { + target: ILP, + num_tasks: usize, +} + +impl ReductionResult for ReductionSTMTTWToILP { + type Source = SequencingToMinimizeTardyTaskWeight; + type Target = ILP; + + fn target_problem(&self) -> &ILP { + &self.target + } + + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + let n = self.num_tasks; + // Decode the n*n block of x_{j,p} variables into a schedule permutation. + // The source uses direct permutation encoding (config = schedule directly), + // so return the schedule as-is (it is already a permutation of 0..n). + one_hot_decode(target_solution, n, n, 0) + } +} + +#[reduction(overhead = { + num_vars = "num_tasks * num_tasks + num_tasks", + num_constraints = "2 * num_tasks + num_tasks * num_tasks", +})] +impl ReduceTo> for SequencingToMinimizeTardyTaskWeight { + type Result = ReductionSTMTTWToILP; + + fn reduce_to(&self) -> Self::Result { + let n = self.num_tasks(); + let num_x_vars = n * n; + let num_vars = num_x_vars + n; + let total_length: u64 = self.lengths().iter().copied().sum(); + let big_m = total_length as f64; + + let x_var = |j: usize, p: usize| -> usize { j * n + p }; + let u_var = |j: usize| -> usize { num_x_vars + j }; + + let mut constraints = Vec::new(); + + // 1. Each task assigned to exactly one position + for j in 0..n { + let terms: Vec<(usize, f64)> = (0..n).map(|p| (x_var(j, p), 1.0)).collect(); + constraints.push(LinearConstraint::eq(terms, 1.0)); + } + + // 2. Each position has exactly one task + for p in 0..n { + let terms: Vec<(usize, f64)> = (0..n).map(|j| (x_var(j, p), 1.0)).collect(); + constraints.push(LinearConstraint::eq(terms, 1.0)); + } + + // 3. Tardy indicator: for each (j, p), if x_{j,p}=1 then + // completion_time_at_p >= l_j + sum_{p' < p} sum_{j'} l_{j'} * x_{j',p'} + // If completion > d_j then u_j must be 1. + // Linearized as: big_m * x_{j,p} + sum_{p' = Vec::new(); + terms.push((x_var(j, p), big_m)); + for pp in 0..p { + for (jj, &len) in lengths.iter().enumerate() { + terms.push((x_var(jj, pp), len as f64)); + } + } + terms.push((u_var(j), -big_m)); + let rhs = self.deadlines()[j] as f64 - lengths[j] as f64 + big_m; + constraints.push(LinearConstraint::le(terms, rhs)); + } + } + + // Objective: minimize sum w_j * u_j + let weights = self.weights(); + let objective: Vec<(usize, f64)> = (0..n).map(|j| (u_var(j), weights[j] as f64)).collect(); + + ReductionSTMTTWToILP { + target: ILP::new(num_vars, constraints, objective, ObjectiveSense::Minimize), + num_tasks: n, + } + } +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_rule_example_specs() -> Vec { + vec![crate::example_db::specs::RuleExampleSpec { + id: "sequencingtominimizetardytaskweight_to_ilp", + build: || { + let source = SequencingToMinimizeTardyTaskWeight::new( + vec![3, 2, 4, 1, 2], + vec![5, 3, 7, 2, 4], + vec![6, 4, 10, 2, 8], + ); + crate::example_db::specs::rule_example_via_ilp::<_, bool>(source) + }, + }] +} + +#[cfg(test)] +#[path = "../unit_tests/rules/sequencingtominimizetardytaskweight_ilp.rs"] +mod tests; diff --git a/src/unit_tests/models/misc/sequencing_to_minimize_tardy_task_weight.rs b/src/unit_tests/models/misc/sequencing_to_minimize_tardy_task_weight.rs new file mode 100644 index 00000000..023b18c7 --- /dev/null +++ b/src/unit_tests/models/misc/sequencing_to_minimize_tardy_task_weight.rs @@ -0,0 +1,198 @@ +use super::*; +use crate::solvers::BruteForce; +use crate::traits::Problem; +use crate::types::Min; + +#[test] +fn test_sequencing_to_minimize_tardy_task_weight_basic() { + let problem = SequencingToMinimizeTardyTaskWeight::new( + vec![3, 2, 4, 1, 2], + vec![5, 3, 7, 2, 4], + vec![6, 4, 10, 2, 8], + ); + + assert_eq!(problem.num_tasks(), 5); + assert_eq!(problem.lengths(), &[3, 2, 4, 1, 2]); + assert_eq!(problem.weights(), &[5, 3, 7, 2, 4]); + assert_eq!(problem.deadlines(), &[6, 4, 10, 2, 8]); + assert_eq!(problem.dims(), vec![5, 5, 5, 5, 5]); + assert_eq!( + ::NAME, + "SequencingToMinimizeTardyTaskWeight" + ); + assert_eq!( + ::variant(), + vec![] + ); +} + +#[test] +fn test_sequencing_to_minimize_tardy_task_weight_evaluate_issue_example() { + let problem = SequencingToMinimizeTardyTaskWeight::new( + vec![3, 2, 4, 1, 2], + vec![5, 3, 7, 2, 4], + vec![6, 4, 10, 2, 8], + ); + + // Schedule [3,0,4,2,1] = t3,t0,t4,t2,t1 + // t3: completes at 1, deadline=2, on time + // t0: completes at 1+3=4, deadline=6, on time + // t4: completes at 4+2=6, deadline=8, on time + // t2: completes at 6+4=10, deadline=10, on time + // t1: completes at 10+2=12, deadline=4, TARDY weight=3 + // Total = 3 + assert_eq!(problem.evaluate(&[3, 0, 4, 2, 1]), Min(Some(3))); +} + +#[test] +fn test_sequencing_to_minimize_tardy_task_weight_evaluate_all_on_time() { + // Single task with generous deadline + let problem = SequencingToMinimizeTardyTaskWeight::new(vec![2, 3], vec![5, 4], vec![10, 10]); + // Both orders: no task is tardy + assert_eq!(problem.evaluate(&[0, 1]), Min(Some(0))); + assert_eq!(problem.evaluate(&[1, 0]), Min(Some(0))); +} + +#[test] +fn test_sequencing_to_minimize_tardy_task_weight_evaluate_all_tardy() { + // Tight deadlines: every task is tardy regardless of order (3 tasks, total length=9) + let problem = + SequencingToMinimizeTardyTaskWeight::new(vec![3, 3, 3], vec![1, 2, 3], vec![2, 2, 2]); + // [0,1,2]: t0 completes 3>2 tardy(1), t1 completes 6>2 tardy(2), t2 completes 9>2 tardy(3) = 6 + assert_eq!(problem.evaluate(&[0, 1, 2]), Min(Some(6))); +} + +#[test] +fn test_sequencing_to_minimize_tardy_task_weight_evaluate_invalid_config() { + let problem = + SequencingToMinimizeTardyTaskWeight::new(vec![2, 3, 1], vec![1, 2, 3], vec![5, 6, 7]); + + // Wrong length + assert_eq!(problem.evaluate(&[0, 1]), Min(None)); + assert_eq!(problem.evaluate(&[0, 1, 2, 0]), Min(None)); + // Not a permutation (duplicate) + assert_eq!(problem.evaluate(&[0, 0, 1]), Min(None)); + // Out of range + assert_eq!(problem.evaluate(&[0, 1, 3]), Min(None)); +} + +#[test] +fn test_sequencing_to_minimize_tardy_task_weight_brute_force_small() { + // 3 tasks so brute force is fast (3^3 = 27 configs) + let problem = + SequencingToMinimizeTardyTaskWeight::new(vec![3, 2, 1], vec![4, 2, 3], vec![4, 3, 6]); + let solver = BruteForce::new(); + let solution = solver + .find_witness(&problem) + .expect("should find a solution"); + let value = problem.evaluate(&solution); + assert!(value.is_valid()); + + // Check it's truly optimal by brute-forcing all permutations + let permutations: Vec> = vec![ + vec![0, 1, 2], + vec![0, 2, 1], + vec![1, 0, 2], + vec![1, 2, 0], + vec![2, 0, 1], + vec![2, 1, 0], + ]; + let best = permutations + .iter() + .filter_map(|perm| problem.evaluate(perm).0) + .min() + .unwrap(); + assert_eq!(value, Min(Some(best))); +} + +#[test] +fn test_sequencing_to_minimize_tardy_task_weight_paper_example() { + let problem = SequencingToMinimizeTardyTaskWeight::new( + vec![3, 2, 4, 1, 2], + vec![5, 3, 7, 2, 4], + vec![6, 4, 10, 2, 8], + ); + let expected_config = vec![3, 0, 4, 2, 1]; + assert_eq!(problem.evaluate(&expected_config), Min(Some(3))); + + let solver = BruteForce::new(); + let solution = solver + .find_witness(&problem) + .expect("should find a solution"); + assert_eq!(problem.evaluate(&solution), Min(Some(3))); +} + +#[test] +fn test_sequencing_to_minimize_tardy_task_weight_serialization() { + let problem = + SequencingToMinimizeTardyTaskWeight::new(vec![3, 2, 1], vec![4, 2, 3], vec![4, 3, 6]); + let json = serde_json::to_value(&problem).unwrap(); + let restored: SequencingToMinimizeTardyTaskWeight = serde_json::from_value(json).unwrap(); + + assert_eq!(restored.lengths(), problem.lengths()); + assert_eq!(restored.weights(), problem.weights()); + assert_eq!(restored.deadlines(), problem.deadlines()); +} + +#[test] +fn test_sequencing_to_minimize_tardy_task_weight_deserialization_rejects_zero_length() { + let err = serde_json::from_value::(serde_json::json!({ + "lengths": [0, 1, 3], + "weights": [1, 2, 3], + "deadlines": [5, 5, 5], + })) + .unwrap_err(); + assert!(err.to_string().contains("task lengths must be positive")); +} + +#[test] +fn test_sequencing_to_minimize_tardy_task_weight_deserialization_rejects_zero_weight() { + let err = serde_json::from_value::(serde_json::json!({ + "lengths": [1, 2, 3], + "weights": [0, 2, 3], + "deadlines": [5, 5, 5], + })) + .unwrap_err(); + assert!(err.to_string().contains("task weights must be positive")); +} + +#[test] +fn test_sequencing_to_minimize_tardy_task_weight_single_task() { + let problem = SequencingToMinimizeTardyTaskWeight::new(vec![3], vec![2], vec![5]); + assert_eq!(problem.dims(), vec![1]); + // completes at 3, deadline 5, on time + assert_eq!(problem.evaluate(&[0]), Min(Some(0))); +} + +#[test] +fn test_sequencing_to_minimize_tardy_task_weight_single_task_tardy() { + let problem = SequencingToMinimizeTardyTaskWeight::new(vec![3], vec![2], vec![2]); + // completes at 3, deadline 2, tardy, weight 2 + assert_eq!(problem.evaluate(&[0]), Min(Some(2))); +} + +#[test] +fn test_sequencing_to_minimize_tardy_task_weight_empty() { + let problem = SequencingToMinimizeTardyTaskWeight::new(vec![], vec![], vec![]); + assert_eq!(problem.num_tasks(), 0); + assert_eq!(problem.dims(), Vec::::new()); + assert_eq!(problem.evaluate(&[]), Min(Some(0))); +} + +#[test] +#[should_panic(expected = "lengths length must equal weights length")] +fn test_sequencing_to_minimize_tardy_task_weight_mismatched_lengths_weights() { + SequencingToMinimizeTardyTaskWeight::new(vec![2, 1], vec![3], vec![5, 5]); +} + +#[test] +#[should_panic(expected = "lengths length must equal deadlines length")] +fn test_sequencing_to_minimize_tardy_task_weight_mismatched_lengths_deadlines() { + SequencingToMinimizeTardyTaskWeight::new(vec![2, 1], vec![3, 4], vec![5]); +} + +#[test] +#[should_panic(expected = "task lengths must be positive")] +fn test_sequencing_to_minimize_tardy_task_weight_zero_length() { + SequencingToMinimizeTardyTaskWeight::new(vec![0, 1], vec![2, 3], vec![5, 5]); +} diff --git a/src/unit_tests/rules/sequencingtominimizetardytaskweight_ilp.rs b/src/unit_tests/rules/sequencingtominimizetardytaskweight_ilp.rs new file mode 100644 index 00000000..71199ad2 --- /dev/null +++ b/src/unit_tests/rules/sequencingtominimizetardytaskweight_ilp.rs @@ -0,0 +1,84 @@ +use super::*; +use crate::models::algebraic::ILP; +use crate::rules::test_helpers::assert_optimization_round_trip_from_optimization_target; +use crate::solvers::{BruteForce, ILPSolver}; +use crate::traits::Problem; + +#[test] +fn test_sequencingtominimizetardytaskweight_to_ilp_closed_loop() { + let problem = + SequencingToMinimizeTardyTaskWeight::new(vec![3, 2, 1], vec![4, 2, 3], vec![4, 3, 6]); + let reduction = ReduceTo::>::reduce_to(&problem); + + assert_optimization_round_trip_from_optimization_target( + &problem, + &reduction, + "SequencingToMinimizeTardyTaskWeight->ILP closed loop", + ); +} + +#[test] +fn test_sequencingtominimizetardytaskweight_to_ilp_bf_vs_ilp() { + let problem = SequencingToMinimizeTardyTaskWeight::new( + vec![3, 2, 4, 1, 2], + vec![5, 3, 7, 2, 4], + vec![6, 4, 10, 2, 8], + ); + + let bf = BruteForce::new(); + let bf_witness = bf.find_witness(&problem).expect("should find a solution"); + let bf_value = problem.evaluate(&bf_witness); + + let reduction = ReduceTo::>::reduce_to(&problem); + let ilp_solution = ILPSolver::new() + .solve(reduction.target_problem()) + .expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + let ilp_value = problem.evaluate(&extracted); + + assert_eq!(bf_value, ilp_value); + assert_eq!(ilp_value.0, Some(3)); +} + +#[test] +fn test_sequencingtominimizetardytaskweight_to_ilp_all_on_time() { + let problem = + SequencingToMinimizeTardyTaskWeight::new(vec![1, 1, 1], vec![2, 3, 4], vec![10, 10, 10]); + let reduction = ReduceTo::>::reduce_to(&problem); + + let ilp_solution = ILPSolver::new() + .solve(reduction.target_problem()) + .expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + let value = problem.evaluate(&extracted); + assert!(value.is_valid()); + assert_eq!(value.0, Some(0)); +} + +#[test] +fn test_sequencingtominimizetardytaskweight_to_ilp_optimal_ordering() { + // 3 tasks where order matters: + // t0: length=4, weight=5, deadline=4 + // t1: length=1, weight=1, deadline=5 + // t2: length=2, weight=3, deadline=3 + // Best schedule: [2,0,1] -> t2 completes 2 (ok), t0 completes 6 (tardy wt=5), t1 completes 7 (tardy wt=1) + // or: [2,1,0] -> t2 completes 2 (ok), t1 completes 3 (ok), t0 completes 7 (tardy wt=5) + // or: [1,2,0] -> t1 completes 1 (ok), t2 completes 3 (ok), t0 completes 7 (tardy wt=5) + // or: [0,1,2] -> t0 completes 4 (ok), t1 completes 5 (ok), t2 completes 7 (tardy wt=3) = 3 + // Minimum is 3 (schedule [0,1,2]) + let problem = + SequencingToMinimizeTardyTaskWeight::new(vec![4, 1, 2], vec![5, 1, 3], vec![4, 5, 3]); + let reduction = ReduceTo::>::reduce_to(&problem); + + let ilp_solution = ILPSolver::new() + .solve(reduction.target_problem()) + .expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + let ilp_value = problem.evaluate(&extracted); + + let bf = BruteForce::new(); + let bf_witness = bf.find_witness(&problem).expect("should have solution"); + let bf_value = problem.evaluate(&bf_witness); + + assert_eq!(ilp_value, bf_value); +} From 67ce930fa23ed6abc41caebfec12845c50eb0b40 Mon Sep 17 00:00:00 2001 From: Xiwei Pan Date: Mon, 30 Mar 2026 05:02:57 +0800 Subject: [PATCH 04/21] feat: add SequencingWithDeadlinesAndSetUpTimes model (#499) Implement the scheduling feasibility problem with compiler-class setup times (GJ SS6) with direct ILP reduction, CLI support, and paper entry. Co-Authored-By: Claude Opus 4.6 (1M context) --- docs/paper/reductions.typ | 62 +++++ problemreductions-cli/src/cli.rs | 7 + problemreductions-cli/src/commands/create.rs | 73 ++++- src/models/misc/mod.rs | 4 + ...uencing_with_deadlines_and_set_up_times.rs | 263 ++++++++++++++++++ src/rules/mod.rs | 3 + ...equencingwithdeadlinesandsetuptimes_ilp.rs | 224 +++++++++++++++ ...uencing_with_deadlines_and_set_up_times.rs | 206 ++++++++++++++ ...equencingwithdeadlinesandsetuptimes_ilp.rs | 121 ++++++++ 9 files changed, 961 insertions(+), 2 deletions(-) create mode 100644 src/models/misc/sequencing_with_deadlines_and_set_up_times.rs create mode 100644 src/rules/sequencingwithdeadlinesandsetuptimes_ilp.rs create mode 100644 src/unit_tests/models/misc/sequencing_with_deadlines_and_set_up_times.rs create mode 100644 src/unit_tests/rules/sequencingwithdeadlinesandsetuptimes_ilp.rs diff --git a/docs/paper/reductions.typ b/docs/paper/reductions.typ index 8509a419..07d04735 100644 --- a/docs/paper/reductions.typ +++ b/docs/paper/reductions.typ @@ -191,6 +191,7 @@ "SequencingToMinimizeTardyTaskWeight": [Sequencing to Minimize Tardy Task Weight], "SequencingToMinimizeWeightedCompletionTime": [Sequencing to Minimize Weighted Completion Time], "SequencingToMinimizeWeightedTardiness": [Sequencing to Minimize Weighted Tardiness], + "SequencingWithDeadlinesAndSetUpTimes": [Sequencing with Deadlines and Set-Up Times], "SequencingWithReleaseTimesAndDeadlines": [Sequencing with Release Times and Deadlines], "SequencingWithinIntervals": [Sequencing Within Intervals], "ShortestCommonSupersequence": [Shortest Common Supersequence], @@ -6472,6 +6473,47 @@ A classical NP-complete problem from Garey and Johnson @garey1979[Ch.~3, p.~76], ] } +#{ + let x = load-model-example("SequencingWithDeadlinesAndSetUpTimes") + let lengths = x.instance.lengths + let deadlines = x.instance.deadlines + let compilers = x.instance.compilers + let setup_times = x.instance.setup_times + let ntasks = lengths.len() + let schedule = x.optimal_config + let completions = { + let t = 0 + let prev_compiler = none + let result = () + for task in schedule { + if prev_compiler != none and prev_compiler != compilers.at(task) { + t += setup_times.at(compilers.at(task)) + } + t += lengths.at(task) + result.push(t) + prev_compiler = compilers.at(task) + } + result + } + [ + #problem-def("SequencingWithDeadlinesAndSetUpTimes")[ + Given a set $T$ of $n$ tasks, a processing-time function $ell: T -> ZZ^+$, a deadline function $d: T -> ZZ^+$, a compiler assignment $k: T -> C$ for a finite set $C$ of compilers, and a setup-time function $s: C -> ZZ_(>=0)$, determine whether there exists a single-machine schedule such that every task $t$ completes by its deadline $d(t)$, where an additional setup time $s(k(t))$ is charged before $t$ whenever $k(t) != k(t')$ for the immediately preceding task $t'$. + ][ + Sequencing with Deadlines and Set-Up Times is problem SS14 in Garey & Johnson @garey1979, usually written $1 | s_(i j) | "feasibility"$. The problem is NP-complete even when all setup times are equal. It generalises Sequencing with Release Times and Deadlines (SS13) by replacing release-time windows with compiler-switch penalties. + + Configurations are direct permutation encodings: the config vector $(sigma_0, dots, sigma_(n-1))$ specifies which task occupies each position, and a configuration is valid iff it is a permutation of $\{0, dots, n-1\}$. + + *Example.* Consider $n = #ntasks$ tasks with lengths $ell = (#lengths.map(v => str(v)).join(", "))$, deadlines $d = (#deadlines.map(v => str(v)).join(", "))$, compilers $k = (#compilers.map(v => str(v)).join(", "))$, and setup times $s = (#setup_times.map(v => str(v)).join(", "))$. The schedule $(#schedule.map(t => $t_(#(t + 1))$).join(", "))$ achieves completion times $(#completions.map(v => str(v)).join(", "))$; every task meets its deadline, so the instance is feasible. + + #pred-commands( + "pred create --example SequencingWithDeadlinesAndSetUpTimes -o sequencing-with-deadlines-and-set-up-times.json", + "pred solve sequencing-with-deadlines-and-set-up-times.json", + "pred evaluate sequencing-with-deadlines-and-set-up-times.json --config " + x.optimal_config.map(str).join(","), + ) + ] + ] +} + #{ let x = load-model-example("IntegralFlowHomologousArcs") let arcs = x.instance.graph.arcs @@ -9524,6 +9566,26 @@ The following reductions to Integer Linear Programming are straightforward formu _Solution extraction._ Read the unique position $p$ with $x_(j,p) = 1$ for each task $j$ to recover the schedule permutation. ] +#reduction-rule("SequencingWithDeadlinesAndSetUpTimes", "ILP")[ + Assign tasks to positions with switch-detection auxiliaries that gate per-compiler setup costs into the deadline constraints. +][ + _Construction._ Let $n$ be the number of tasks. Variables: binary $x_(j,p)$ with $x_(j,p) = 1$ iff task $j$ occupies position $p$; binary $"sw"_p$ for $p >= 1$ indicating a compiler switch before position $p$; binary $a_(j,p) = x_(j,p) dot "sw"_p$ (linearised product). Let $M = sum_j ell_j + max_c s(c) dot (n-1)$. The ILP is: + $ + "find" quad & bold(x) \ + "subject to" quad & sum_p x_(j,p) = 1 quad forall j \ + & sum_j x_(j,p) = 1 quad forall p \ + & x_(j,p) + x_(j',p-1) - "sw"_p <= 1 quad forall p >= 1, j, j' : k(j) != k(j') \ + & a_(j,p) <= x_(j,p), quad a_(j,p) <= "sw"_p, quad x_(j,p) + "sw"_p - a_(j,p) <= 1 quad forall j, p >= 1 \ + & M x_(j,p) + sum_(p' < p) sum_(j') ell_(j') x_(j',p') + sum_(p'=1)^(p) sum_(j') s(k(j')) a_(j',p') <= d_j - ell_j + M quad forall j, p \ + & x_(j,p), "sw"_p, a_(j,p) in {0, 1} + $. + The switch-detection row forces $"sw"_p = 1$ whenever the tasks at positions $p-1$ and $p$ use different compilers. The $a_(j,p)$ linearisation then routes the correct per-compiler setup time into the completion-time bound for each position. + + _Correctness._ ($arrow.r.double$) Any feasible schedule assigns each task to a position; the switch indicator equals one exactly when consecutive compilers differ, and the deadline constraint is satisfied by hypothesis. ($arrow.l.double$) Any feasible ILP solution is a valid permutation and the deadline bound ensures each task finishes on time accounting for all setup penalties. + + _Solution extraction._ Read the unique position $p$ with $x_(j,p) = 1$ for each task $j$ to recover the schedule permutation. +] + #reduction-rule("SequencingToMinimizeWeightedTardiness", "ILP")[ Encode the single-machine order with pairwise precedence bits and completion times, then linearize the weighted tardiness bound with nonnegative tardiness variables. ][ diff --git a/problemreductions-cli/src/cli.rs b/problemreductions-cli/src/cli.rs index 226c56d0..fbd0d5ae 100644 --- a/problemreductions-cli/src/cli.rs +++ b/problemreductions-cli/src/cli.rs @@ -315,6 +315,7 @@ Flags by problem type: SequencingToMinimizeTardyTaskWeight --sizes, --weights, --deadlines SequencingToMinimizeWeightedCompletionTime --lengths, --weights [--precedence-pairs] SequencingToMinimizeWeightedTardiness --sizes, --weights, --deadlines, --bound + SequencingWithDeadlinesAndSetUpTimes --sizes, --deadlines, --compilers, --setup-times MinimumExternalMacroDataCompression --string, --pointer-cost [--alphabet-size] MinimumInternalMacroDataCompression --string, --pointer-cost [--alphabet-size] SCS --strings [--alphabet-size] @@ -757,6 +758,12 @@ pub struct CreateArgs { /// Number of sectors for ExpectedRetrievalCost #[arg(long)] pub num_sectors: Option, + /// Compiler index for each task in SequencingWithDeadlinesAndSetUpTimes (comma-separated, e.g., "0,1,0,1,0") + #[arg(long)] + pub compilers: Option, + /// Setup times per compiler for SequencingWithDeadlinesAndSetUpTimes (comma-separated, e.g., "1,2") + #[arg(long)] + pub setup_times: Option, /// Source string for StringToStringCorrection (comma-separated symbol indices, e.g., "0,1,2,3") #[arg(long)] pub source_string: Option, diff --git a/problemreductions-cli/src/commands/create.rs b/problemreductions-cli/src/commands/create.rs index b73fc011..2e061b0d 100644 --- a/problemreductions-cli/src/commands/create.rs +++ b/problemreductions-cli/src/commands/create.rs @@ -32,8 +32,9 @@ use problemreductions::models::misc::{ SchedulingToMinimizeWeightedCompletionTime, SchedulingWithIndividualDeadlines, SequencingToMinimizeMaximumCumulativeCost, SequencingToMinimizeTardyTaskWeight, SequencingToMinimizeWeightedCompletionTime, SequencingToMinimizeWeightedTardiness, - SequencingWithReleaseTimesAndDeadlines, SequencingWithinIntervals, ShortestCommonSupersequence, - StringToStringCorrection, SubsetSum, SumOfSquaresPartition, ThreePartition, TimetableDesign, + SequencingWithDeadlinesAndSetUpTimes, SequencingWithReleaseTimesAndDeadlines, + SequencingWithinIntervals, ShortestCommonSupersequence, StringToStringCorrection, SubsetSum, + SumOfSquaresPartition, ThreePartition, TimetableDesign, }; use problemreductions::models::BiconnectivityAugmentation; use problemreductions::prelude::*; @@ -205,6 +206,8 @@ fn all_data_flags_empty(args: &CreateArgs) -> bool { && args.rhs.is_none() && args.coeff_c.is_none() && args.required_columns.is_none() + && args.compilers.is_none() + && args.setup_times.is_none() } fn emit_problem_output(output: &ProblemJsonOutput, out: &OutputConfig) -> Result<()> { @@ -3736,6 +3739,70 @@ pub fn create(args: &CreateArgs, out: &OutputConfig) -> Result<()> { ) } + // SequencingWithDeadlinesAndSetUpTimes + "SequencingWithDeadlinesAndSetUpTimes" => { + let sizes_str = args.sizes.as_deref().ok_or_else(|| { + anyhow::anyhow!( + "SequencingWithDeadlinesAndSetUpTimes requires --sizes, --deadlines, --compilers, and --setup-times\n\n\ + Usage: pred create SequencingWithDeadlinesAndSetUpTimes --sizes 2,3,1,2,2 --deadlines 4,11,3,16,7 --compilers 0,1,0,1,0 --setup-times 1,2" + ) + })?; + let deadlines_str = args.deadlines.as_deref().ok_or_else(|| { + anyhow::anyhow!( + "SequencingWithDeadlinesAndSetUpTimes requires --deadlines\n\n\ + Usage: pred create SequencingWithDeadlinesAndSetUpTimes --sizes 2,3,1,2,2 --deadlines 4,11,3,16,7 --compilers 0,1,0,1,0 --setup-times 1,2" + ) + })?; + let compilers_str = args.compilers.as_deref().ok_or_else(|| { + anyhow::anyhow!( + "SequencingWithDeadlinesAndSetUpTimes requires --compilers\n\n\ + Usage: pred create SequencingWithDeadlinesAndSetUpTimes --sizes 2,3,1,2,2 --deadlines 4,11,3,16,7 --compilers 0,1,0,1,0 --setup-times 1,2" + ) + })?; + let setup_times_str = args.setup_times.as_deref().ok_or_else(|| { + anyhow::anyhow!( + "SequencingWithDeadlinesAndSetUpTimes requires --setup-times\n\n\ + Usage: pred create SequencingWithDeadlinesAndSetUpTimes --sizes 2,3,1,2,2 --deadlines 4,11,3,16,7 --compilers 0,1,0,1,0 --setup-times 1,2" + ) + })?; + let lengths: Vec = util::parse_comma_list(sizes_str)?; + let deadlines: Vec = util::parse_comma_list(deadlines_str)?; + let compilers: Vec = util::parse_comma_list(compilers_str)?; + let setup_times: Vec = util::parse_comma_list(setup_times_str)?; + anyhow::ensure!( + lengths.len() == deadlines.len(), + "lengths length ({}) must equal deadlines length ({})", + lengths.len(), + deadlines.len() + ); + anyhow::ensure!( + lengths.len() == compilers.len(), + "lengths length ({}) must equal compilers length ({})", + lengths.len(), + compilers.len() + ); + anyhow::ensure!( + lengths.iter().all(|&l| l > 0), + "task lengths must be positive" + ); + let num_compilers = setup_times.len(); + for &c in &compilers { + anyhow::ensure!( + c < num_compilers, + "compiler index {c} is out of range for setup_times of length {num_compilers}" + ); + } + ( + ser(SequencingWithDeadlinesAndSetUpTimes::new( + lengths, + deadlines, + compilers, + setup_times, + ))?, + resolved_variant.clone(), + ) + } + // SequencingToMinimizeWeightedCompletionTime "SequencingToMinimizeWeightedCompletionTime" => { let lengths_str = args.lengths.as_deref().ok_or_else(|| { @@ -8060,6 +8127,8 @@ mod tests { rhs: None, coeff_c: None, required_columns: None, + compilers: None, + setup_times: None, } } diff --git a/src/models/misc/mod.rs b/src/models/misc/mod.rs index ccda3a4b..a34a5d51 100644 --- a/src/models/misc/mod.rs +++ b/src/models/misc/mod.rs @@ -34,6 +34,7 @@ //! - [`SequencingToMinimizeTardyTaskWeight`]: Minimize total weight of tardy tasks //! - [`SequencingToMinimizeWeightedCompletionTime`]: Minimize total weighted completion time //! - [`SequencingToMinimizeWeightedTardiness`]: Decide whether a schedule meets a weighted tardiness bound +//! - [`SequencingWithDeadlinesAndSetUpTimes`]: Single-machine scheduling feasibility with compiler-switch setup penalties //! - [`SequencingWithReleaseTimesAndDeadlines`]: Single-machine scheduling feasibility //! - [`SequencingWithinIntervals`]: Schedule tasks within time windows //! - [`ShortestCommonSupersequence`]: Find a common supersequence of bounded length @@ -103,6 +104,7 @@ mod sequencing_to_minimize_maximum_cumulative_cost; mod sequencing_to_minimize_tardy_task_weight; mod sequencing_to_minimize_weighted_completion_time; mod sequencing_to_minimize_weighted_tardiness; +mod sequencing_with_deadlines_and_set_up_times; mod sequencing_with_release_times_and_deadlines; mod sequencing_within_intervals; pub(crate) mod shortest_common_supersequence; @@ -152,6 +154,7 @@ pub use sequencing_to_minimize_maximum_cumulative_cost::SequencingToMinimizeMaxi pub use sequencing_to_minimize_tardy_task_weight::SequencingToMinimizeTardyTaskWeight; pub use sequencing_to_minimize_weighted_completion_time::SequencingToMinimizeWeightedCompletionTime; pub use sequencing_to_minimize_weighted_tardiness::SequencingToMinimizeWeightedTardiness; +pub use sequencing_with_deadlines_and_set_up_times::SequencingWithDeadlinesAndSetUpTimes; pub use sequencing_with_release_times_and_deadlines::SequencingWithReleaseTimesAndDeadlines; pub use sequencing_within_intervals::SequencingWithinIntervals; pub use shortest_common_supersequence::ShortestCommonSupersequence; @@ -197,6 +200,7 @@ pub(crate) fn canonical_model_example_specs() -> Vec", description: "Processing time for each task" }, + FieldInfo { name: "deadlines", type_name: "Vec", description: "Deadline d(t) for each task" }, + FieldInfo { name: "compilers", type_name: "Vec", description: "Compiler index k(t) for each task" }, + FieldInfo { name: "setup_times", type_name: "Vec", description: "Setup time s(c) charged when switching away from compiler c" }, + ], + } +} + +/// Sequencing with Deadlines and Set-Up Times problem. +/// +/// Given tasks with processing times `l(t)`, deadlines `d(t)`, compiler +/// assignments `k(t)`, and per-compiler setup times `s(c)`, find a +/// single-machine schedule in which all tasks meet their deadlines, where a +/// setup penalty `s(k(t'))` is added before any task `t` that uses a +/// different compiler than the immediately preceding task `t'`. +/// +/// This is problem SS14 in Garey & Johnson (1979), written +/// $1 | s_{ij} | \text{feasibility}$. +/// +/// Configurations are direct permutation encodings with `dims() = [n; n]`: +/// each position holds the index of the task scheduled at that position. +/// A configuration is valid iff it is a permutation of `0..n`. +#[derive(Debug, Clone, Serialize)] +pub struct SequencingWithDeadlinesAndSetUpTimes { + lengths: Vec, + deadlines: Vec, + compilers: Vec, + setup_times: Vec, +} + +#[derive(Deserialize)] +struct SequencingWithDeadlinesAndSetUpTimesSerde { + lengths: Vec, + deadlines: Vec, + compilers: Vec, + setup_times: Vec, +} + +impl SequencingWithDeadlinesAndSetUpTimes { + fn validate( + lengths: &[u64], + deadlines: &[u64], + compilers: &[usize], + setup_times: &[u64], + ) -> Result<(), String> { + if lengths.len() != deadlines.len() { + return Err("lengths length must equal deadlines length".to_string()); + } + if lengths.len() != compilers.len() { + return Err("lengths length must equal compilers length".to_string()); + } + if lengths.contains(&0) { + return Err("task lengths must be positive".to_string()); + } + let num_compilers = setup_times.len(); + for &c in compilers { + if c >= num_compilers { + return Err(format!( + "compiler index {c} is out of range for setup_times of length {num_compilers}" + )); + } + } + Ok(()) + } + + /// Create a new sequencing instance. + /// + /// # Panics + /// + /// Panics if the input vectors are inconsistent or contain invalid values. + pub fn new( + lengths: Vec, + deadlines: Vec, + compilers: Vec, + setup_times: Vec, + ) -> Self { + Self::validate(&lengths, &deadlines, &compilers, &setup_times) + .unwrap_or_else(|err| panic!("{err}")); + Self { + lengths, + deadlines, + compilers, + setup_times, + } + } + + /// Returns the number of tasks. + pub fn num_tasks(&self) -> usize { + self.lengths.len() + } + + /// Returns the number of distinct compilers (= `setup_times.len()`). + pub fn num_compilers(&self) -> usize { + self.setup_times.len() + } + + /// Returns the processing times. + pub fn lengths(&self) -> &[u64] { + &self.lengths + } + + /// Returns the task deadlines. + pub fn deadlines(&self) -> &[u64] { + &self.deadlines + } + + /// Returns the compiler index for each task. + pub fn compilers(&self) -> &[usize] { + &self.compilers + } + + /// Returns the per-compiler setup times. + pub fn setup_times(&self) -> &[u64] { + &self.setup_times + } + + /// Decode a direct permutation configuration. + /// + /// Returns `Some(schedule)` if the config is a valid permutation of `0..n`, + /// or `None` otherwise. + fn decode_permutation(config: &[usize], n: usize) -> Option> { + if config.len() != n { + return None; + } + let mut seen = vec![false; n]; + for &task in config { + if task >= n || seen[task] { + return None; + } + seen[task] = true; + } + Some(config.to_vec()) + } + + /// Check whether a schedule meets all deadlines. + /// + /// Returns `true` iff every task in the schedule completes by its deadline. + fn all_deadlines_met(&self, schedule: &[usize]) -> bool { + let mut elapsed: u64 = 0; + let mut prev_compiler: Option = None; + for &task in schedule { + // Add setup time if the compiler switches. + if let Some(prev) = prev_compiler { + if prev != self.compilers[task] { + elapsed = elapsed + .checked_add(self.setup_times[self.compilers[task]]) + .expect("elapsed time overflowed u64"); + } + } + elapsed = elapsed + .checked_add(self.lengths[task]) + .expect("elapsed time overflowed u64"); + if elapsed > self.deadlines[task] { + return false; + } + prev_compiler = Some(self.compilers[task]); + } + true + } +} + +impl TryFrom for SequencingWithDeadlinesAndSetUpTimes { + type Error = String; + + fn try_from(value: SequencingWithDeadlinesAndSetUpTimesSerde) -> Result { + Self::validate( + &value.lengths, + &value.deadlines, + &value.compilers, + &value.setup_times, + )?; + Ok(Self { + lengths: value.lengths, + deadlines: value.deadlines, + compilers: value.compilers, + setup_times: value.setup_times, + }) + } +} + +impl<'de> Deserialize<'de> for SequencingWithDeadlinesAndSetUpTimes { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + let value = SequencingWithDeadlinesAndSetUpTimesSerde::deserialize(deserializer)?; + Self::try_from(value).map_err(serde::de::Error::custom) + } +} + +impl Problem for SequencingWithDeadlinesAndSetUpTimes { + const NAME: &'static str = "SequencingWithDeadlinesAndSetUpTimes"; + type Value = Or; + + fn variant() -> Vec<(&'static str, &'static str)> { + crate::variant_params![] + } + + fn dims(&self) -> Vec { + let n = self.num_tasks(); + vec![n; n] + } + + fn evaluate(&self, config: &[usize]) -> Or { + let n = self.num_tasks(); + let Some(schedule) = Self::decode_permutation(config, n) else { + return Or(false); + }; + Or(self.all_deadlines_met(&schedule)) + } +} + +crate::declare_variants! { + default SequencingWithDeadlinesAndSetUpTimes => "factorial(num_tasks)", +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_model_example_specs() -> Vec { + vec![crate::example_db::specs::ModelExampleSpec { + id: "sequencing_with_deadlines_and_set_up_times", + // 5 tasks, lengths [2,3,1,2,2], deadlines [4,11,3,16,7], compilers [0,1,0,1,0], + // setup_times [1,2]. + // Optimal config: [2,0,4,1,3] (tasks t3,t1,t5,t2,t4 in 1-indexed) + // Position 0: task 2 (compiler 0), no prev → elapsed = 0+1 = 1 ≤ d[2]=3 ✓ + // Position 1: task 0 (compiler 0), same → elapsed = 1+2 = 3 ≤ d[0]=4 ✓ + // Position 2: task 4 (compiler 0), same → elapsed = 3+2 = 5 ≤ d[4]=7 ✓ + // Position 3: task 1 (compiler 1), switch+s[1]=2 → elapsed = 5+2+3 = 10 ≤ d[1]=11 ✓ + // Position 4: task 3 (compiler 1), same → elapsed = 10+2 = 12 ≤ d[3]=16 ✓ + instance: Box::new(SequencingWithDeadlinesAndSetUpTimes::new( + vec![2, 3, 1, 2, 2], + vec![4, 11, 3, 16, 7], + vec![0, 1, 0, 1, 0], + vec![1, 2], + )), + optimal_config: vec![2, 0, 4, 1, 3], + optimal_value: serde_json::json!(true), + }] +} + +#[cfg(test)] +#[path = "../../unit_tests/models/misc/sequencing_with_deadlines_and_set_up_times.rs"] +mod tests; diff --git a/src/rules/mod.rs b/src/rules/mod.rs index 1933cd14..3a8cf343 100644 --- a/src/rules/mod.rs +++ b/src/rules/mod.rs @@ -220,6 +220,8 @@ pub(crate) mod sequencingtominimizeweightedcompletiontime_ilp; #[cfg(feature = "ilp-solver")] pub(crate) mod sequencingtominimizeweightedtardiness_ilp; #[cfg(feature = "ilp-solver")] +pub(crate) mod sequencingwithdeadlinesandsetuptimes_ilp; +#[cfg(feature = "ilp-solver")] pub(crate) mod sequencingwithinintervals_ilp; #[cfg(feature = "ilp-solver")] pub(crate) mod sequencingwithreleasetimesanddeadlines_ilp; @@ -384,6 +386,7 @@ pub(crate) fn canonical_rule_example_specs() -> Vec. +//! +//! Position-assignment ILP with compiler-switch detection. +//! +//! Variables: +//! - `x_{j,p}` binary: task j occupies position p (n*n variables) +//! - `sw_p` binary: a compiler switch occurs before position p (n-1 variables, p >= 1) +//! - `a_{j,p}` binary: x_{j,p} = 1 AND sw_p = 1 (n*(n-1) variables, p >= 1) +//! +//! The completion time of task j at position p equals the sum of all task +//! lengths up to and including position p, plus the setup times for switches +//! at each position 1..=p. Using the `a_{j,p}` linearisation, the setup +//! contribution at position p is `sum_j s[k(j)] * a_{j,p}`. +//! +//! Deadline enforcement uses the standard big-M trick: for each (j, p), +//! if `x_{j,p}=1` then the completion time at p must not exceed `d[j]`. + +use crate::models::algebraic::{LinearConstraint, ObjectiveSense, ILP}; +use crate::models::misc::SequencingWithDeadlinesAndSetUpTimes; +use crate::reduction; +use crate::rules::ilp_helpers::one_hot_decode; +use crate::rules::traits::{ReduceTo, ReductionResult}; + +/// Result of reducing SequencingWithDeadlinesAndSetUpTimes to ILP. +#[derive(Debug, Clone)] +pub struct ReductionSWDSTToILP { + target: ILP, + num_tasks: usize, +} + +impl ReductionResult for ReductionSWDSTToILP { + type Source = SequencingWithDeadlinesAndSetUpTimes; + type Target = ILP; + + fn target_problem(&self) -> &ILP { + &self.target + } + + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + let n = self.num_tasks; + // x_{j,p} occupies the first n*n variables: decode the permutation. + one_hot_decode(target_solution, n, n, 0) + } +} + +#[reduction(overhead = { + num_vars = "num_tasks * num_tasks + (num_tasks - 1) + num_tasks * (num_tasks - 1)", + num_constraints = "2 * num_tasks + 3 * (num_tasks - 1) + 3 * num_tasks * (num_tasks - 1) + num_tasks * num_tasks", +})] +impl ReduceTo> for SequencingWithDeadlinesAndSetUpTimes { + type Result = ReductionSWDSTToILP; + + fn reduce_to(&self) -> Self::Result { + let n = self.num_tasks(); + + // Handle empty case. + if n == 0 { + return ReductionSWDSTToILP { + target: ILP::new(0, vec![], vec![], ObjectiveSense::Minimize), + num_tasks: 0, + }; + } + + // Variable layout: + // x_{j,p} = j*n + p for j,p in 0..n → indices 0..n*n + // sw_p = n*n + (p-1) for p in 1..n → indices n*n .. n*n+(n-1) + // a_{j,p} = n*n+(n-1)+j*(n-1)+(p-1) for j in 0..n, p in 1..n + // → indices n*n+(n-1) .. n*n+(n-1)+n*(n-1) + let num_x = n * n; + let sw_offset = num_x; + let a_offset = sw_offset + (n - 1); + let num_vars = a_offset + n * (n - 1); + + let x_var = |j: usize, p: usize| -> usize { j * n + p }; + let sw_var = |p: usize| -> usize { sw_offset + (p - 1) }; // p >= 1 + let a_var = |j: usize, p: usize| -> usize { a_offset + j * (n - 1) + (p - 1) }; // p >= 1 + + let lengths = self.lengths(); + let deadlines = self.deadlines(); + let compilers = self.compilers(); + let setup_times = self.setup_times(); + + // Big-M: total processing time + worst-case total setup overhead. + let total_length: u64 = lengths.iter().copied().sum(); + let max_setup: u64 = setup_times.iter().copied().max().unwrap_or(0); + let big_m = total_length as f64 + max_setup as f64 * (n as f64 - 1.0); + + let mut constraints = Vec::new(); + + // 1. Each task assigned to exactly one position: sum_p x_{j,p} = 1 for all j. + for j in 0..n { + let terms: Vec<(usize, f64)> = (0..n).map(|p| (x_var(j, p), 1.0)).collect(); + constraints.push(LinearConstraint::eq(terms, 1.0)); + } + + // 2. Each position has exactly one task: sum_j x_{j,p} = 1 for all p. + for p in 0..n { + let terms: Vec<(usize, f64)> = (0..n).map(|j| (x_var(j, p), 1.0)).collect(); + constraints.push(LinearConstraint::eq(terms, 1.0)); + } + + // For each position p >= 1: + for p in 1..n { + // 3. Switch detection: sw_p >= x_{j,p} + x_{j',p-1} - 1 + // whenever k(j) != k(j'). + // This forces sw_p = 1 whenever the tasks at p-1 and p differ. + for j in 0..n { + for j_prev in 0..n { + if compilers[j] != compilers[j_prev] { + // sw_p - x_{j,p} - x_{j',p-1} >= -1 + // i.e., x_{j,p} + x_{j',p-1} - sw_p <= 1 + constraints.push(LinearConstraint::le( + vec![ + (x_var(j, p), 1.0), + (x_var(j_prev, p - 1), 1.0), + (sw_var(p), -1.0), + ], + 1.0, + )); + } + } + } + + // 4. Linearisation of a_{j,p} = x_{j,p} * sw_p for each j: + // a_{j,p} <= x_{j,p} + // a_{j,p} <= sw_p + // a_{j,p} >= x_{j,p} + sw_p - 1 + for j in 0..n { + // a_{j,p} <= x_{j,p} + constraints.push(LinearConstraint::le( + vec![(a_var(j, p), 1.0), (x_var(j, p), -1.0)], + 0.0, + )); + // a_{j,p} <= sw_p + constraints.push(LinearConstraint::le( + vec![(a_var(j, p), 1.0), (sw_var(p), -1.0)], + 0.0, + )); + // a_{j,p} >= x_{j,p} + sw_p - 1 + // i.e. x_{j,p} + sw_p - a_{j,p} <= 1 + constraints.push(LinearConstraint::le( + vec![(x_var(j, p), 1.0), (sw_var(p), 1.0), (a_var(j, p), -1.0)], + 1.0, + )); + } + } + + // 5. Deadline constraints: for each (j, p), if x_{j,p}=1, then + // the completion time at position p must be <= d[j]. + // + // Completion time at position p = + // sum_{p'<=p} sum_{j''} l_{j''} * x_{j'',p'} + // + sum_{p'=1..=p} sum_{j''} s[k(j'')] * a_{j'',p'} + // + // Big-M form (only active when x_{j,p}=1): + // M * x_{j,p} + // + sum_{p' = Vec::new(); + // Big-M activation term + terms.push((x_var(j, p), big_m)); + // Processing time for positions 0..p (not including p itself) + for pp in 0..p { + for (jj, &len) in lengths.iter().enumerate() { + terms.push((x_var(jj, pp), len as f64)); + } + } + // Setup time for positions 1..=p + for pp in 1..=p { + for jj in 0..n { + let s = setup_times[compilers[jj]] as f64; + if s > 0.0 { + terms.push((a_var(jj, pp), s)); + } + } + } + let rhs = deadlines[j] as f64 - lengths[j] as f64 + big_m; + constraints.push(LinearConstraint::le(terms, rhs)); + } + } + + ReductionSWDSTToILP { + target: ILP::new(num_vars, constraints, vec![], ObjectiveSense::Minimize), + num_tasks: n, + } + } +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_rule_example_specs() -> Vec { + vec![crate::example_db::specs::RuleExampleSpec { + id: "sequencingwithdeadlinesandsetuptimes_to_ilp", + build: || { + let source = SequencingWithDeadlinesAndSetUpTimes::new( + vec![2, 3, 1, 2, 2], + vec![4, 11, 3, 16, 7], + vec![0, 1, 0, 1, 0], + vec![1, 2], + ); + crate::example_db::specs::rule_example_via_ilp::<_, bool>(source) + }, + }] +} + +#[cfg(test)] +#[path = "../unit_tests/rules/sequencingwithdeadlinesandsetuptimes_ilp.rs"] +mod tests; diff --git a/src/unit_tests/models/misc/sequencing_with_deadlines_and_set_up_times.rs b/src/unit_tests/models/misc/sequencing_with_deadlines_and_set_up_times.rs new file mode 100644 index 00000000..eac5475d --- /dev/null +++ b/src/unit_tests/models/misc/sequencing_with_deadlines_and_set_up_times.rs @@ -0,0 +1,206 @@ +use super::*; +use crate::solvers::BruteForce; +use crate::traits::Problem; +use crate::types::Or; + +#[test] +fn test_sequencing_with_deadlines_and_set_up_times_creation() { + let problem = SequencingWithDeadlinesAndSetUpTimes::new( + vec![2, 3, 1, 2, 2], + vec![4, 11, 3, 16, 7], + vec![0, 1, 0, 1, 0], + vec![1, 2], + ); + + assert_eq!(problem.num_tasks(), 5); + assert_eq!(problem.num_compilers(), 2); + assert_eq!(problem.lengths(), &[2, 3, 1, 2, 2]); + assert_eq!(problem.deadlines(), &[4, 11, 3, 16, 7]); + assert_eq!(problem.compilers(), &[0, 1, 0, 1, 0]); + assert_eq!(problem.setup_times(), &[1, 2]); + assert_eq!(problem.dims(), vec![5, 5, 5, 5, 5]); + assert_eq!( + ::NAME, + "SequencingWithDeadlinesAndSetUpTimes" + ); + assert_eq!( + ::variant(), + vec![] + ); +} + +#[test] +fn test_sequencing_with_deadlines_and_set_up_times_evaluate_feasible() { + let problem = SequencingWithDeadlinesAndSetUpTimes::new( + vec![2, 3, 1, 2, 2], + vec![4, 11, 3, 16, 7], + vec![0, 1, 0, 1, 0], + vec![1, 2], + ); + + // Config [2,0,4,1,3]: tasks t2,t0,t4,t1,t3 (0-indexed) + // Position 0: task 2 (compiler 0), no prev → elapsed = 0+1 = 1 ≤ 3 ✓ + // Position 1: task 0 (compiler 0), same → elapsed = 1+2 = 3 ≤ 4 ✓ + // Position 2: task 4 (compiler 0), same → elapsed = 3+2 = 5 ≤ 7 ✓ + // Position 3: task 1 (compiler 1), switch s[1]=2 → elapsed = 5+2+3 = 10 ≤ 11 ✓ + // Position 4: task 3 (compiler 1), same → elapsed = 10+2 = 12 ≤ 16 ✓ + assert_eq!(problem.evaluate(&[2, 0, 4, 1, 3]), Or(true)); +} + +#[test] +fn test_sequencing_with_deadlines_and_set_up_times_evaluate_infeasible() { + let problem = SequencingWithDeadlinesAndSetUpTimes::new( + vec![2, 3, 1, 2, 2], + vec![4, 11, 3, 16, 7], + vec![0, 1, 0, 1, 0], + vec![1, 2], + ); + + // Config [0,1,2,3,4]: tasks in natural order + // Position 0: task 0 (compiler 0), no prev → elapsed = 0+2 = 2 ≤ 4 ✓ + // Position 1: task 1 (compiler 1), switch s[1]=2 → elapsed = 2+2+3 = 7 ≤ 11 ✓ + // Position 2: task 2 (compiler 0), switch s[0]=1 → elapsed = 7+1+1 = 9 > 3 ✗ + assert_eq!(problem.evaluate(&[0, 1, 2, 3, 4]), Or(false)); +} + +#[test] +fn test_sequencing_with_deadlines_and_set_up_times_evaluate_invalid_permutation() { + let problem = SequencingWithDeadlinesAndSetUpTimes::new( + vec![2, 3, 1], + vec![5, 10, 5], + vec![0, 1, 0], + vec![1, 2], + ); + + // Wrong length + assert_eq!(problem.evaluate(&[0, 1]), Or(false)); + assert_eq!(problem.evaluate(&[0, 1, 2, 0]), Or(false)); + // Duplicate + assert_eq!(problem.evaluate(&[0, 0, 1]), Or(false)); + // Out of range + assert_eq!(problem.evaluate(&[0, 1, 3]), Or(false)); +} + +#[test] +fn test_sequencing_with_deadlines_and_set_up_times_brute_force_small() { + // 3 tasks, no setup time needed if same compiler, easy instance. + let problem = SequencingWithDeadlinesAndSetUpTimes::new( + vec![1, 1, 1], + vec![3, 3, 3], + vec![0, 0, 0], + vec![0], + ); + let solver = BruteForce::new(); + let solution = solver + .find_witness(&problem) + .expect("should find a feasible schedule"); + assert_eq!(problem.evaluate(&solution), Or(true)); +} + +#[test] +fn test_sequencing_with_deadlines_and_set_up_times_brute_force_infeasible() { + // All deadlines are 1, but each task takes 2 — impossible. + let problem = SequencingWithDeadlinesAndSetUpTimes::new( + vec![2, 2, 2], + vec![1, 1, 1], + vec![0, 0, 0], + vec![0], + ); + let solver = BruteForce::new(); + assert!( + solver.find_witness(&problem).is_none(), + "infeasible instance should return None" + ); +} + +#[test] +fn test_sequencing_with_deadlines_and_set_up_times_paper_example() { + let problem = SequencingWithDeadlinesAndSetUpTimes::new( + vec![2, 3, 1, 2, 2], + vec![4, 11, 3, 16, 7], + vec![0, 1, 0, 1, 0], + vec![1, 2], + ); + let expected_config = vec![2, 0, 4, 1, 3]; + assert_eq!(problem.evaluate(&expected_config), Or(true)); + + let solver = BruteForce::new(); + let solution = solver + .find_witness(&problem) + .expect("paper example should be feasible"); + assert_eq!(problem.evaluate(&solution), Or(true)); +} + +#[test] +fn test_sequencing_with_deadlines_and_set_up_times_serialization() { + let problem = SequencingWithDeadlinesAndSetUpTimes::new( + vec![2, 3, 1], + vec![5, 10, 4], + vec![0, 1, 0], + vec![1, 2], + ); + let json = serde_json::to_value(&problem).unwrap(); + let restored: SequencingWithDeadlinesAndSetUpTimes = serde_json::from_value(json).unwrap(); + + assert_eq!(restored.lengths(), problem.lengths()); + assert_eq!(restored.deadlines(), problem.deadlines()); + assert_eq!(restored.compilers(), problem.compilers()); + assert_eq!(restored.setup_times(), problem.setup_times()); +} + +#[test] +fn test_sequencing_with_deadlines_and_set_up_times_deserialization_rejects_zero_length() { + let err = serde_json::from_value::(serde_json::json!({ + "lengths": [0, 1, 2], + "deadlines": [5, 5, 5], + "compilers": [0, 0, 0], + "setup_times": [1], + })) + .unwrap_err(); + assert!(err.to_string().contains("task lengths must be positive")); +} + +#[test] +fn test_sequencing_with_deadlines_and_set_up_times_deserialization_rejects_out_of_range_compiler() { + let err = serde_json::from_value::(serde_json::json!({ + "lengths": [1, 2], + "deadlines": [5, 5], + "compilers": [0, 2], + "setup_times": [1, 2], + })) + .unwrap_err(); + assert!(err.to_string().contains("out of range")); +} + +#[test] +fn test_sequencing_with_deadlines_and_set_up_times_setup_time_charged_on_switch() { + // Two tasks, different compilers: setup time s[compiler_of_task1] is charged + // before task 1 because task 0 uses a different compiler. + // lengths [1,1], deadlines [1, 4], compilers [0,1], setup_times [0, 2] + // Schedule [0,1]: elapsed after t0 = 1 ≤ 1 ✓; switch s[1]=2; elapsed = 1+2+1 = 4 ≤ 4 ✓ + let problem = + SequencingWithDeadlinesAndSetUpTimes::new(vec![1, 1], vec![1, 4], vec![0, 1], vec![0, 2]); + assert_eq!(problem.evaluate(&[0, 1]), Or(true)); + // Tight deadline: if setup charged, 1+2+1=4 > 3 ✗ + let tight = + SequencingWithDeadlinesAndSetUpTimes::new(vec![1, 1], vec![1, 3], vec![0, 1], vec![0, 2]); + assert_eq!(tight.evaluate(&[0, 1]), Or(false)); +} + +#[test] +#[should_panic(expected = "lengths length must equal deadlines length")] +fn test_sequencing_with_deadlines_and_set_up_times_mismatched_lengths_deadlines() { + SequencingWithDeadlinesAndSetUpTimes::new(vec![1, 2], vec![5], vec![0, 0], vec![1]); +} + +#[test] +#[should_panic(expected = "lengths length must equal compilers length")] +fn test_sequencing_with_deadlines_and_set_up_times_mismatched_lengths_compilers() { + SequencingWithDeadlinesAndSetUpTimes::new(vec![1, 2], vec![5, 5], vec![0], vec![1]); +} + +#[test] +#[should_panic(expected = "task lengths must be positive")] +fn test_sequencing_with_deadlines_and_set_up_times_zero_length() { + SequencingWithDeadlinesAndSetUpTimes::new(vec![0, 1], vec![5, 5], vec![0, 0], vec![1]); +} diff --git a/src/unit_tests/rules/sequencingwithdeadlinesandsetuptimes_ilp.rs b/src/unit_tests/rules/sequencingwithdeadlinesandsetuptimes_ilp.rs new file mode 100644 index 00000000..23f97a1a --- /dev/null +++ b/src/unit_tests/rules/sequencingwithdeadlinesandsetuptimes_ilp.rs @@ -0,0 +1,121 @@ +use super::*; +use crate::models::algebraic::ILP; +use crate::rules::test_helpers::assert_satisfaction_round_trip_from_optimization_target; +use crate::solvers::{BruteForce, ILPSolver}; +use crate::traits::Problem; +use crate::types::Or; + +#[test] +fn test_sequencingwithdeadlinesandsetuptimes_to_ilp_closed_loop() { + // Small feasible instance (3 tasks) + let problem = SequencingWithDeadlinesAndSetUpTimes::new( + vec![1, 1, 1], + vec![1, 3, 5], + vec![0, 1, 0], + vec![0, 1], + ); + let reduction = ReduceTo::>::reduce_to(&problem); + + assert_satisfaction_round_trip_from_optimization_target( + &problem, + &reduction, + "SequencingWithDeadlinesAndSetUpTimes->ILP closed loop", + ); +} + +#[test] +fn test_sequencingwithdeadlinesandsetuptimes_to_ilp_feasible_paper_example() { + let problem = SequencingWithDeadlinesAndSetUpTimes::new( + vec![2, 3, 1, 2, 2], + vec![4, 11, 3, 16, 7], + vec![0, 1, 0, 1, 0], + vec![1, 2], + ); + + let bf = BruteForce::new(); + let bf_witness = bf + .find_witness(&problem) + .expect("paper example should be feasible"); + assert_eq!(problem.evaluate(&bf_witness), Or(true)); + + let reduction = ReduceTo::>::reduce_to(&problem); + let ilp_solution = ILPSolver::new() + .solve(reduction.target_problem()) + .expect("ILP should be feasible"); + let extracted = reduction.extract_solution(&ilp_solution); + assert_eq!(problem.evaluate(&extracted), Or(true)); +} + +#[test] +fn test_sequencingwithdeadlinesandsetuptimes_to_ilp_infeasible() { + // All tasks have deadline 1 but each takes 2 — clearly impossible. + let problem = + SequencingWithDeadlinesAndSetUpTimes::new(vec![2, 2], vec![1, 1], vec![0, 0], vec![0]); + let reduction = ReduceTo::>::reduce_to(&problem); + assert!( + ILPSolver::new().solve(reduction.target_problem()).is_none(), + "infeasible instance should produce infeasible ILP" + ); +} + +#[test] +fn test_sequencingwithdeadlinesandsetuptimes_to_ilp_setup_time_respected() { + // Two tasks with different compilers: setup time s=2 must be charged. + // lengths [1,1], deadlines [1, 4], compilers [0,1], setup_times [0, 2] + // Order [0,1]: elapsed=1≤1 ✓, then switch s=2, elapsed=1+2+1=4≤4 ✓ → feasible + // Order [1,0]: elapsed=1≤4 ✓, then switch s=0, elapsed=1+0+1=2≤1 ✗ → infeasible + let problem = + SequencingWithDeadlinesAndSetUpTimes::new(vec![1, 1], vec![1, 4], vec![0, 1], vec![0, 2]); + let reduction = ReduceTo::>::reduce_to(&problem); + let ilp_solution = ILPSolver::new() + .solve(reduction.target_problem()) + .expect("ILP should be feasible"); + let extracted = reduction.extract_solution(&ilp_solution); + assert_eq!(problem.evaluate(&extracted), Or(true)); +} + +#[test] +fn test_sequencingwithdeadlinesandsetuptimes_to_ilp_bf_vs_ilp_small() { + // 3 tasks: verify brute force and ILP agree on feasibility. + let problem = SequencingWithDeadlinesAndSetUpTimes::new( + vec![2, 1, 3], + vec![3, 5, 9], + vec![0, 1, 0], + vec![1, 2], + ); + + let bf = BruteForce::new(); + let bf_result = bf.find_witness(&problem); + let bf_feasible = bf_result.is_some(); + + let reduction = ReduceTo::>::reduce_to(&problem); + let ilp_result = ILPSolver::new().solve(reduction.target_problem()); + let ilp_feasible = ilp_result.is_some(); + + assert_eq!( + bf_feasible, ilp_feasible, + "BF and ILP should agree on feasibility" + ); + if let Some(ilp_solution) = ilp_result { + let extracted = reduction.extract_solution(&ilp_solution); + assert_eq!(problem.evaluate(&extracted), Or(true)); + } +} + +#[test] +fn test_sequencingwithdeadlinesandsetuptimes_to_ilp_no_setup_same_compiler() { + // All tasks use the same compiler: no setup time ever charged. + // Tight deadlines that are only feasible without setup. + let problem = SequencingWithDeadlinesAndSetUpTimes::new( + vec![1, 2, 1], + vec![1, 3, 4], + vec![0, 0, 0], + vec![100], // large setup time, but never triggered + ); + let reduction = ReduceTo::>::reduce_to(&problem); + let ilp_solution = ILPSolver::new() + .solve(reduction.target_problem()) + .expect("should be feasible with no switches"); + let extracted = reduction.extract_solution(&ilp_solution); + assert_eq!(problem.evaluate(&extracted), Or(true)); +} From a8c17999de0b58769ad6d74f431c40e7d9343300 Mon Sep 17 00:00:00 2001 From: Xiwei Pan Date: Mon, 30 Mar 2026 06:46:30 +0800 Subject: [PATCH 05/21] feat: add PreemptiveScheduling model (#504) Implement multiprocessor preemptive scheduling (GJ SS12) with binary time-slot assignment, direct ILP reduction, CLI support, and paper entry. Co-Authored-By: Claude Opus 4.6 (1M context) --- docs/paper/reductions.typ | 52 +++ problemreductions-cli/src/commands/create.rs | 63 +++- src/models/misc/mod.rs | 4 + src/models/misc/preemptive_scheduling.rs | 295 ++++++++++++++++++ src/rules/mod.rs | 3 + src/rules/preemptivescheduling_ilp.rs | 156 +++++++++ .../models/misc/preemptive_scheduling.rs | 231 ++++++++++++++ .../rules/preemptivescheduling_ilp.rs | 105 +++++++ 8 files changed, 908 insertions(+), 1 deletion(-) create mode 100644 src/models/misc/preemptive_scheduling.rs create mode 100644 src/rules/preemptivescheduling_ilp.rs create mode 100644 src/unit_tests/models/misc/preemptive_scheduling.rs create mode 100644 src/unit_tests/rules/preemptivescheduling_ilp.rs diff --git a/docs/paper/reductions.typ b/docs/paper/reductions.typ index 07d04735..f923f3ed 100644 --- a/docs/paper/reductions.typ +++ b/docs/paper/reductions.typ @@ -177,6 +177,7 @@ "PartitionIntoPathsOfLength2": [Partition into Paths of Length 2], "PartitionIntoTriangles": [Partition Into Triangles], "PrecedenceConstrainedScheduling": [Precedence Constrained Scheduling], + "PreemptiveScheduling": [Preemptive Scheduling], "PrimeAttributeName": [Prime Attribute Name], "QuadraticAssignment": [Quadratic Assignment], "QuadraticDiophantineEquations": [Quadratic Diophantine Equations], @@ -5935,6 +5936,39 @@ A classical NP-complete problem from Garey and Johnson @garey1979[Ch.~3, p.~76], ] ] } +#{ + let x = load-model-example("PreemptiveScheduling") + let n = x.instance.lengths.len() + let m = x.instance.num_processors + let lengths = x.instance.lengths + let precs = x.instance.precedences + let d_max = lengths.fold(0, (acc, l) => acc + l) + let cfg = x.optimal_config + // For each task t, collect active time slots from the flat binary config + let active-slots = range(n).map(t => + range(d_max).filter(u => cfg.at(t * d_max + u) == 1) + ) + let makespan = x.optimal_value + [ + #problem-def("PreemptiveScheduling")[ + Given a set $T$ of $n$ tasks with processing lengths $ell: T -> ZZ^+$, a number $m in ZZ^+$ of identical processors, and a set of precedence constraints $prec$ on $T$, find a preemptive schedule that minimizes the makespan. + + A preemptive schedule assigns each task $t$ a (possibly non-contiguous) set $S(t) subset.eq {0, 1, dots, D_"max" - 1}$ of unit time slots, where $D_"max" = sum_t ell(t)$, such that $|S(t)| = ell(t)$ for all $t$, at most $m$ tasks are active at each slot, and for every precedence $(t_i prec t_j)$, the last slot of $t_i$ precedes the first slot of $t_j$. + + The makespan is $max_{t in T} (max S(t) + 1)$. + ][ + Preemptive Scheduling is problem A5 SS6 in Garey & Johnson @garey1979. NP-complete in general; the special case without precedences ($m$ arbitrary) is solvable in polynomial time (McNaughton's wrap-around algorithm), and the preemptive open-shop variant is also polynomial. The configuration representation is a binary vector of length $n dot D_"max"$ encoding per-slot assignments. + + *Example.* Let $n = #n$ tasks with lengths $(#lengths.map(str).join(", "))$, $m = #m$ processors, and precedences #{precs.map(p => $t_#(p.at(0)) prec t_#(p.at(1))$).join(", ")}. Optimal makespan: $#makespan$. Schedule: #range(n).map(t => [$t_#t$ at slots $[#active-slots.at(t).map(str).join(", ")]$]).join("; "). + + #pred-commands( + "pred create --example PreemptiveScheduling -o preemptive-scheduling.json", + "pred solve preemptive-scheduling.json", + "pred evaluate preemptive-scheduling.json --config " + cfg.map(str).join(","), + ) + ] + ] +} #{ let x = load-model-example("SchedulingToMinimizeWeightedCompletionTime") let ntasks = x.instance.lengths.len() @@ -9158,6 +9192,24 @@ The following reductions to Integer Linear Programming are straightforward formu _Solution extraction._ Task $j$ is scheduled at time $arg max_t x_(j,t)$. ] +#reduction-rule("PreemptiveScheduling", "ILP")[ + Minimize makespan for preemptive parallel scheduling with variable-length tasks and precedence constraints. +][ + _Construction._ Let $D = sum_t ell(t)$ be the horizon. Variables: binary $x_(t,u) in {0,1}$ (task $t$ processed at slot $u$) for $t in {0, dots, n-1}$, $u in {0, dots, D-1}$; integer $M in {0, dots, D}$ (makespan). The ILP is: + $ + min quad & M \ + "subject to" quad & sum_u x_(t,u) = ell(t) quad forall t quad "(work)" \ + & sum_t x_(t,u) <= m quad forall u quad "(capacity)" \ + & sum_u u dot x_(j,u) - sum_u u dot x_(i,u) >= 1 quad "for each" (i prec j) quad "(precedence)" \ + & M - (u+1) dot x_(t,u) >= 0 quad forall t, u quad "(makespan)" \ + & x_(t,u) in {0, 1}, quad M in ZZ_(>= 0) + $. + + _Correctness._ Work constraints enforce each task runs for exactly $ell(t)$ slots. Capacity limits at most $m$ tasks per slot. Precedences are enforced by weighted time indicators. Makespan lower bounds force $M >= u+1$ whenever task $t$ is active at slot $u$. + + _Solution extraction._ Config$[t dot D + u] = x_(t,u)$ for all $t, u$. +] + #reduction-rule("SequencingWithinIntervals", "ILP")[ Schedule tasks with release times, deadlines, and processing lengths on a single machine without overlap. ][ diff --git a/problemreductions-cli/src/commands/create.rs b/problemreductions-cli/src/commands/create.rs index 2e061b0d..070b12cb 100644 --- a/problemreductions-cli/src/commands/create.rs +++ b/problemreductions-cli/src/commands/create.rs @@ -27,7 +27,7 @@ use problemreductions::models::misc::{ IntegerExpressionMembership, JobShopScheduling, KnownValue, KthLargestMTuple, LongestCommonSubsequence, MinimumExternalMacroDataCompression, MinimumInternalMacroDataCompression, MinimumTardinessSequencing, MultiprocessorScheduling, - PaintShop, PartiallyOrderedKnapsack, ProductionPlanning, QueryArg, + PaintShop, PartiallyOrderedKnapsack, PreemptiveScheduling, ProductionPlanning, QueryArg, RectilinearPictureCompression, RegisterSufficiency, ResourceConstrainedScheduling, SchedulingToMinimizeWeightedCompletionTime, SchedulingWithIndividualDeadlines, SequencingToMinimizeMaximumCumulativeCost, SequencingToMinimizeTardyTaskWeight, @@ -686,6 +686,9 @@ fn example_for(canonical: &str, graph_type: Option<&str>) -> &'static str { "--num-periods 6 --demands 5,3,7,2,8,5 --capacities 12,12,12,12,12,12 --setup-costs 10,10,10,10,10,10 --production-costs 1,1,1,1,1,1 --inventory-costs 1,1,1,1,1,1 --cost-bound 80" } "MultiprocessorScheduling" => "--lengths 4,5,3,2,6 --num-processors 2 --deadline 10", + "PreemptiveScheduling" => { + "--sizes 2,1,3,2,1 --num-processors 2 --precedence-pairs \"0>2,1>3\"" + } "SchedulingToMinimizeWeightedCompletionTime" => { "--lengths 1,2,3,4,5 --weights 6,4,3,2,1 --num-processors 2" } @@ -3438,6 +3441,64 @@ pub fn create(args: &CreateArgs, out: &OutputConfig) -> Result<()> { ) } + // PreemptiveScheduling + "PreemptiveScheduling" => { + let usage = "Usage: pred create PreemptiveScheduling --sizes 2,1,3,2,1 --num-processors 2 [--precedence-pairs \"0>2,1>3\"]"; + let sizes_str = args.sizes.as_deref().ok_or_else(|| { + anyhow::anyhow!( + "PreemptiveScheduling requires --sizes and --num-processors\n\n{usage}" + ) + })?; + let num_processors = args.num_processors.ok_or_else(|| { + anyhow::anyhow!("PreemptiveScheduling requires --num-processors\n\n{usage}") + })?; + anyhow::ensure!( + num_processors > 0, + "PreemptiveScheduling requires --num-processors > 0\n\n{usage}" + ); + let lengths: Vec = util::parse_comma_list(sizes_str)?; + anyhow::ensure!( + lengths.iter().all(|&l| l > 0), + "PreemptiveScheduling: all task lengths must be positive\n\n{usage}" + ); + let num_tasks = lengths.len(); + let precedences: Vec<(usize, usize)> = match args.precedence_pairs.as_deref() { + Some(s) if !s.is_empty() => s + .split(',') + .map(|pair| { + let parts: Vec<&str> = pair.trim().split('>').collect(); + anyhow::ensure!( + parts.len() == 2, + "Invalid precedence format '{}', expected 'u>v'", + pair.trim() + ); + Ok(( + parts[0].trim().parse::()?, + parts[1].trim().parse::()?, + )) + }) + .collect::>>()?, + _ => vec![], + }; + for &(pred, succ) in &precedences { + anyhow::ensure!( + pred < num_tasks && succ < num_tasks, + "precedence index out of range: ({}, {}) but num_tasks = {}", + pred, + succ, + num_tasks + ); + } + ( + ser(PreemptiveScheduling::new( + lengths, + num_processors, + precedences, + ))?, + resolved_variant.clone(), + ) + } + // SchedulingToMinimizeWeightedCompletionTime "SchedulingToMinimizeWeightedCompletionTime" => { let usage = "Usage: pred create SchedulingToMinimizeWeightedCompletionTime --lengths 1,2,3,4,5 --weights 6,4,3,2,1 --num-processors 2"; diff --git a/src/models/misc/mod.rs b/src/models/misc/mod.rs index a34a5d51..86391564 100644 --- a/src/models/misc/mod.rs +++ b/src/models/misc/mod.rs @@ -24,6 +24,7 @@ //! - [`Partition`]: Partition a multiset into two equal-sum subsets //! - [`PartiallyOrderedKnapsack`]: Knapsack with precedence constraints //! - [`PrecedenceConstrainedScheduling`]: Schedule unit tasks on processors by deadline +//! - [`PreemptiveScheduling`]: Preemptive parallel scheduling with precedences (minimize makespan) //! - [`ProductionPlanning`]: Meet all period demands within capacity and total-cost bounds //! - [`RectilinearPictureCompression`]: Cover 1-entries with bounded rectangles //! - [`RegisterSufficiency`]: Evaluate DAG computation with bounded registers @@ -94,6 +95,7 @@ pub(crate) mod paintshop; pub(crate) mod partially_ordered_knapsack; pub(crate) mod partition; mod precedence_constrained_scheduling; +mod preemptive_scheduling; mod production_planning; mod rectilinear_picture_compression; mod register_sufficiency; @@ -144,6 +146,7 @@ pub use paintshop::PaintShop; pub use partially_ordered_knapsack::PartiallyOrderedKnapsack; pub use partition::Partition; pub use precedence_constrained_scheduling::PrecedenceConstrainedScheduling; +pub use preemptive_scheduling::PreemptiveScheduling; pub use production_planning::ProductionPlanning; pub use rectilinear_picture_compression::RectilinearPictureCompression; pub use register_sufficiency::RegisterSufficiency; @@ -216,5 +219,6 @@ pub(crate) fn canonical_model_example_specs() -> Vec", description: "Processing length l(t) for each task" }, + FieldInfo { name: "num_processors", type_name: "usize", description: "Number of identical processors m" }, + FieldInfo { name: "precedences", type_name: "Vec<(usize, usize)>", description: "Precedence pairs (pred, succ) — pred must finish before succ starts" }, + ], + } +} + +/// The Preemptive Scheduling problem. +/// +/// Given `n` tasks with processing lengths `l(0), ..., l(n-1)`, `m` identical +/// processors, and a set of precedence constraints, find a preemptive schedule +/// that minimizes the makespan. +/// +/// Tasks may be interrupted and resumed at later time slots (preemption). +/// A configuration is a binary vector of length `n × D_max` where +/// `D_max = sum of all lengths` is the worst-case makespan. +/// +/// `config[t * D_max + u] = 1` means task `t` is processed at time slot `u`. +/// +/// A valid schedule satisfies: +/// - Each task `t` is active in exactly `l(t)` time slots. +/// - At most `m` tasks are active at any time slot. +/// - For each precedence `(pred, succ)`, the last active slot of `pred` is +/// strictly less than the first active slot of `succ`. +/// +/// The makespan is `max_t (last active slot of t + 1)`. +/// +/// # Example +/// +/// ``` +/// use problemreductions::models::misc::PreemptiveScheduling; +/// use problemreductions::Problem; +/// +/// let problem = PreemptiveScheduling::new(vec![2, 1], 2, vec![]); +/// // D_max = 3, config length = 2 * 3 = 6 +/// // task 0 active at slots 0,1; task 1 active at slot 0 +/// let config = vec![1, 1, 0, 1, 0, 0]; +/// assert_eq!(problem.evaluate(&config), problemreductions::types::Min(Some(2))); +/// ``` +#[derive(Debug, Clone, Serialize)] +pub struct PreemptiveScheduling { + /// Processing length for each task. + lengths: Vec, + /// Number of identical processors. + num_processors: usize, + /// Precedence constraints: (pred, succ) means pred must finish before succ starts. + precedences: Vec<(usize, usize)>, +} + +#[derive(Deserialize)] +struct PreemptiveSchedulingSerde { + lengths: Vec, + num_processors: usize, + precedences: Vec<(usize, usize)>, +} + +impl PreemptiveScheduling { + fn validate( + lengths: &[usize], + num_processors: usize, + precedences: &[(usize, usize)], + ) -> Result<(), String> { + if lengths.contains(&0) { + return Err("task lengths must be positive".to_string()); + } + if num_processors == 0 { + return Err("num_processors must be positive".to_string()); + } + let n = lengths.len(); + for &(pred, succ) in precedences { + if pred >= n || succ >= n { + return Err(format!( + "precedence index out of range: ({pred}, {succ}) but num_tasks = {n}" + )); + } + } + Ok(()) + } + + /// Create a new Preemptive Scheduling instance. + /// + /// # Arguments + /// * `lengths` - Processing length `l(t)` for each task (must be positive) + /// * `num_processors` - Number of identical processors `m` (must be positive) + /// * `precedences` - Pairs `(pred, succ)`: task `pred` must finish before task `succ` starts + /// + /// # Panics + /// + /// Panics if any length is zero, `num_processors` is zero, or any precedence + /// index is out of range. + pub fn new( + lengths: Vec, + num_processors: usize, + precedences: Vec<(usize, usize)>, + ) -> Self { + Self::validate(&lengths, num_processors, &precedences) + .unwrap_or_else(|err| panic!("{err}")); + Self { + lengths, + num_processors, + precedences, + } + } + + /// Get the number of tasks. + pub fn num_tasks(&self) -> usize { + self.lengths.len() + } + + /// Get the number of processors. + pub fn num_processors(&self) -> usize { + self.num_processors + } + + /// Get the number of precedence constraints. + pub fn num_precedences(&self) -> usize { + self.precedences.len() + } + + /// Get the processing lengths. + pub fn lengths(&self) -> &[usize] { + &self.lengths + } + + /// Get the precedence constraints. + pub fn precedences(&self) -> &[(usize, usize)] { + &self.precedences + } + + /// Compute `D_max = sum of all task lengths` (worst-case makespan). + pub fn d_max(&self) -> usize { + self.lengths.iter().sum() + } +} + +impl TryFrom for PreemptiveScheduling { + type Error = String; + + fn try_from(value: PreemptiveSchedulingSerde) -> Result { + Self::validate(&value.lengths, value.num_processors, &value.precedences)?; + Ok(Self { + lengths: value.lengths, + num_processors: value.num_processors, + precedences: value.precedences, + }) + } +} + +impl<'de> Deserialize<'de> for PreemptiveScheduling { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + let value = PreemptiveSchedulingSerde::deserialize(deserializer)?; + Self::try_from(value).map_err(serde::de::Error::custom) + } +} + +impl Problem for PreemptiveScheduling { + const NAME: &'static str = "PreemptiveScheduling"; + type Value = Min; + + fn variant() -> Vec<(&'static str, &'static str)> { + crate::variant_params![] + } + + fn dims(&self) -> Vec { + let d = self.d_max(); + vec![2; self.num_tasks() * d] + } + + fn evaluate(&self, config: &[usize]) -> Min { + let n = self.num_tasks(); + let d = self.d_max(); + + // Check config length + if config.len() != n * d { + return Min(None); + } + + // Check each slot is binary + if config.iter().any(|&v| v > 1) { + return Min(None); + } + + // Check each task t is active in exactly l(t) slots + for t in 0..n { + let active: usize = config[t * d..(t + 1) * d].iter().sum(); + if active != self.lengths[t] { + return Min(None); + } + } + + // Check processor capacity at each time slot + for u in 0..d { + let active_count: usize = (0..n).filter(|&t| config[t * d + u] == 1).count(); + if active_count > self.num_processors { + return Min(None); + } + } + + // Check precedence constraints: + // last active slot of pred < first active slot of succ + for &(pred, succ) in &self.precedences { + let last_pred = (0..d).rev().find(|&u| config[pred * d + u] == 1); + let first_succ = (0..d).find(|&u| config[succ * d + u] == 1); + if let (Some(lp), Some(fs)) = (last_pred, first_succ) { + if lp >= fs { + return Min(None); + } + } + } + + // Compute makespan: max over all t of (last active slot + 1) + let makespan = (0..n) + .filter_map(|t| (0..d).rev().find(|&u| config[t * d + u] == 1)) + .map(|last| last + 1) + .max() + .unwrap_or(0); + + Min(Some(makespan)) + } +} + +crate::declare_variants! { + default PreemptiveScheduling => "2^(num_tasks * num_tasks)", +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_model_example_specs() -> Vec { + // 5 tasks, lengths [2,1,3,2,1], 2 processors, precedences [(0,2),(1,3)] + // D_max = 2+1+3+2+1 = 9 + // Optimal schedule (makespan 5): + // t0: slots 0,1 → t0*9+0=1, t0*9+1=1 + // t1: slot 0 → t1*9+0=1 + // t2: slots 2,3,4 → t2*9+2=1, t2*9+3=1, t2*9+4=1 + // t3: slots 2,3 → t3*9+2=1, t3*9+3=1 + // t4: slot 1 → t4*9+1=1 + // config indices (length 45): + // t0 (0..9): [1,1,0,0,0,0,0,0,0] + // t1 (9..18): [1,0,0,0,0,0,0,0,0] + // t2 (18..27):[0,0,1,1,1,0,0,0,0] + // t3 (27..36):[0,0,1,1,0,0,0,0,0] + // t4 (36..45):[0,1,0,0,0,0,0,0,0] + let mut config = vec![0usize; 5 * 9]; + // t0 (config[0..9]) at slots 0,1 + config[0] = 1; + config[1] = 1; + // t1 (config[9..18]) at slot 0 + config[9] = 1; + // t2 (config[18..27]) at slots 2,3,4 + config[18 + 2] = 1; + config[18 + 3] = 1; + config[18 + 4] = 1; + // t3 (config[27..36]) at slots 2,3 + config[27 + 2] = 1; + config[27 + 3] = 1; + // t4 (config[36..45]) at slot 1 + config[36 + 1] = 1; + vec![crate::example_db::specs::ModelExampleSpec { + id: "preemptive_scheduling", + instance: Box::new(PreemptiveScheduling::new( + vec![2, 1, 3, 2, 1], + 2, + vec![(0, 2), (1, 3)], + )), + optimal_config: config, + optimal_value: serde_json::json!(5), + }] +} + +#[cfg(test)] +#[path = "../../unit_tests/models/misc/preemptive_scheduling.rs"] +mod tests; diff --git a/src/rules/mod.rs b/src/rules/mod.rs index 3a8cf343..90abab79 100644 --- a/src/rules/mod.rs +++ b/src/rules/mod.rs @@ -196,6 +196,8 @@ pub(crate) mod pathconstrainednetworkflow_ilp; #[cfg(feature = "ilp-solver")] pub(crate) mod precedenceconstrainedscheduling_ilp; #[cfg(feature = "ilp-solver")] +pub(crate) mod preemptivescheduling_ilp; +#[cfg(feature = "ilp-solver")] pub(crate) mod quadraticassignment_ilp; #[cfg(feature = "ilp-solver")] pub(crate) mod qubo_ilp; @@ -375,6 +377,7 @@ pub(crate) fn canonical_rule_example_specs() -> Vec. +//! +//! Time-indexed formulation with an auxiliary integer makespan variable: +//! - Variables: binary x_{t,u} for t in 0..n, u in 0..D_max (task t processed at slot u), +//! plus integer M (the makespan), indexed at position n*D_max. +//! - Variable index for x_{t,u}: t * D_max + u. +//! - Variable index for M: n * D_max. +//! - Constraints: +//! 1. Work: Σ_u x_{t,u} = l(t) for each task t +//! 2. Capacity: Σ_t x_{t,u} ≤ m for each time slot u +//! 3. Precedence: for each (pred, succ) and each slot u, +//! `l(pred) * x_{succ,u} ≤ Σ_{v=0}^{u-1} x_{pred,v}` +//! This ensures succ can only be active at slot u if pred has already +//! completed all l(pred) units of work in slots 0..u-1. +//! 4. Makespan lower bound: M ≥ (u+1) when x_{t,u}=1: +//! `M - (u+1)*x_{t,u} ≥ 0` for all t,u +//! 5. Binary bounds: x_{t,u} ≤ 1 for each t,u +//! (since ILP uses non-negative integer domain) +//! - Objective: Minimize M. +//! +//! Note: ILP treats all variables as non-negative integers. Binary constraints +//! on x_{t,u} are enforced by x_{t,u} ≤ 1. + +use crate::models::algebraic::{LinearConstraint, ObjectiveSense, ILP}; +use crate::models::misc::PreemptiveScheduling; +use crate::reduction; +use crate::rules::traits::{ReduceTo, ReductionResult}; + +/// Result of reducing PreemptiveScheduling to ILP. +/// +/// Variable layout: +/// - x_{t,u} at index t * D_max + u for t in 0..n, u in 0..D_max (n*D_max vars) +/// - M at index n * D_max (1 integer var) +/// +/// Total: n * D_max + 1 variables. +#[derive(Debug, Clone)] +pub struct ReductionPSToILP { + target: ILP, + num_tasks: usize, + d_max: usize, +} + +impl ReductionResult for ReductionPSToILP { + type Source = PreemptiveScheduling; + type Target = ILP; + + fn target_problem(&self) -> &ILP { + &self.target + } + + /// Extract schedule from ILP solution. + /// + /// Returns a binary config of length n * D_max: `config[t * D_max + u] = x_{t,u}`. + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + let nd = self.num_tasks * self.d_max; + target_solution[..nd.min(target_solution.len())].to_vec() + } +} + +#[reduction( + overhead = { + num_vars = "num_tasks * num_tasks + 1", + num_constraints = "num_tasks + num_tasks * num_tasks + num_precedences * num_tasks + num_tasks * num_tasks", + } +)] +impl ReduceTo> for PreemptiveScheduling { + type Result = ReductionPSToILP; + + fn reduce_to(&self) -> Self::Result { + let n = self.num_tasks(); + let m = self.num_processors(); + let d = self.d_max(); + let num_task_vars = n * d; + let m_var = num_task_vars; // index of the makespan variable M + let num_vars = num_task_vars + 1; + + let x = |t: usize, u: usize| t * d + u; + + let mut constraints = Vec::new(); + + // 1. Work constraints: Σ_u x_{t,u} = l(t) for each task t + for t in 0..n { + let terms: Vec<(usize, f64)> = (0..d).map(|u| (x(t, u), 1.0)).collect(); + constraints.push(LinearConstraint::eq(terms, self.lengths()[t] as f64)); + } + + // 2. Capacity constraints: Σ_t x_{t,u} ≤ m for each time slot u + for u in 0..d { + let terms: Vec<(usize, f64)> = (0..n).map(|t| (x(t, u), 1.0)).collect(); + constraints.push(LinearConstraint::le(terms, m as f64)); + } + + // 3. Precedence constraints: for each (pred, succ) and each slot u: + // l(pred) * x_{succ,u} ≤ Σ_{v=0}^{u-1} x_{pred,v} + // i.e. l(pred) * x_{succ,u} - Σ_{v=0}^{u-1} x_{pred,v} ≤ 0 + // + // Interpretation: succ can only be active at slot u once pred has + // accumulated all l(pred) units of work in strictly earlier slots. + for &(pred, succ) in self.precedences() { + let l_pred = self.lengths()[pred] as f64; + for u in 0..d { + // Σ_{v=0}^{u-1} x_{pred,v} - l(pred)*x_{succ,u} ≥ 0 + // i.e. l(pred)*x_{succ,u} - Σ_{v = Vec::new(); + // Cumulative pred work up to u-1 + for v in 0..u { + terms.push((x(pred, v), -1.0)); + } + terms.push((x(succ, u), l_pred)); + constraints.push(LinearConstraint::le(terms, 0.0)); + } + } + + // 4. Makespan lower bound: M - (u+1)*x_{t,u} ≥ 0 for all t,u + for t in 0..n { + for u in 0..d { + constraints.push(LinearConstraint::ge( + vec![(m_var, 1.0), (x(t, u), -((u + 1) as f64))], + 0.0, + )); + } + } + + // 5. Binary upper bound: x_{t,u} ≤ 1 for all t,u + for t in 0..n { + for u in 0..d { + constraints.push(LinearConstraint::le(vec![(x(t, u), 1.0)], 1.0)); + } + } + + // Objective: minimize M + let objective = vec![(m_var, 1.0)]; + + ReductionPSToILP { + target: ILP::new(num_vars, constraints, objective, ObjectiveSense::Minimize), + num_tasks: n, + d_max: d, + } + } +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_rule_example_specs() -> Vec { + vec![crate::example_db::specs::RuleExampleSpec { + id: "preemptivescheduling_to_ilp", + build: || { + // 3 tasks, lengths [2,1,2], 2 processors, precedence (0,2) + let source = PreemptiveScheduling::new(vec![2, 1, 2], 2, vec![(0, 2)]); + crate::example_db::specs::rule_example_via_ilp::<_, i32>(source) + }, + }] +} + +#[cfg(test)] +#[path = "../unit_tests/rules/preemptivescheduling_ilp.rs"] +mod tests; diff --git a/src/unit_tests/models/misc/preemptive_scheduling.rs b/src/unit_tests/models/misc/preemptive_scheduling.rs new file mode 100644 index 00000000..d1e2f880 --- /dev/null +++ b/src/unit_tests/models/misc/preemptive_scheduling.rs @@ -0,0 +1,231 @@ +use super::*; +use crate::traits::Problem; +use crate::types::Min; + +// ─── helpers ─────────────────────────────────────────────────────────────── + +/// Small instance: 2 tasks with lengths [2, 1], 2 processors, no precedences. +/// D_max = 3. Config length = 2 * 3 = 6. +fn small_instance() -> PreemptiveScheduling { + PreemptiveScheduling::new(vec![2, 1], 2, vec![]) +} + +/// 2 tasks with a precedence: task 0 → task 1. +/// lengths [1, 1], 2 processors, precedence (0,1). +/// D_max = 2. Config length = 2 * 2 = 4. +fn precedence_instance() -> PreemptiveScheduling { + PreemptiveScheduling::new(vec![1, 1], 2, vec![(0, 1)]) +} + +// ─── creation / accessor tests ───────────────────────────────────────────── + +#[test] +fn test_preemptive_scheduling_creation() { + let p = PreemptiveScheduling::new(vec![2, 1, 3], 2, vec![(0, 2)]); + assert_eq!(p.num_tasks(), 3); + assert_eq!(p.num_processors(), 2); + assert_eq!(p.num_precedences(), 1); + assert_eq!(p.lengths(), &[2, 1, 3]); + assert_eq!(p.precedences(), &[(0, 2)]); + assert_eq!(p.d_max(), 6); + assert_eq!(p.dims(), vec![2; 3 * 6]); + assert_eq!( + ::NAME, + "PreemptiveScheduling" + ); + assert!(::variant().is_empty()); +} + +#[test] +fn test_preemptive_scheduling_empty_tasks() { + let p = PreemptiveScheduling::new(vec![], 1, vec![]); + assert_eq!(p.num_tasks(), 0); + assert_eq!(p.d_max(), 0); + assert_eq!(p.dims(), Vec::::new()); + assert_eq!(p.evaluate(&[]), Min(Some(0))); +} + +// ─── evaluate: valid configs ──────────────────────────────────────────────── + +#[test] +fn test_preemptive_scheduling_evaluate_valid_no_precedence() { + let p = small_instance(); + // D_max=3; t0 active at 0,1 t1 active at 0 + // config layout: [t0s0, t0s1, t0s2, t1s0, t1s1, t1s2] + let config = vec![1, 1, 0, 1, 0, 0]; + assert_eq!(p.evaluate(&config), Min(Some(2))); +} + +#[test] +fn test_preemptive_scheduling_evaluate_valid_split() { + // Single processor, 1 task of length 2; split into slots 0 and 2 + let p = PreemptiveScheduling::new(vec![2], 1, vec![]); + // D_max=2, config length=2 + let config = vec![1, 1]; + assert_eq!(p.evaluate(&config), Min(Some(2))); +} + +#[test] +fn test_preemptive_scheduling_evaluate_valid_precedence() { + // Task 0 finishes at slot 0 (last=0), task 1 starts at slot 1 (first=1). OK. + let p = precedence_instance(); + // D_max=2; t0=[1,0], t1=[0,1] + let config = vec![1, 0, 0, 1]; + assert_eq!(p.evaluate(&config), Min(Some(2))); +} + +#[test] +fn test_preemptive_scheduling_makespan_correct() { + // 3 tasks on 3 processors, no precedences, all finish at slot 2 + let p = PreemptiveScheduling::new(vec![1, 1, 1], 3, vec![]); + // D_max=3; each task active in exactly 1 slot, all at slot 2 + let config = vec![ + 0, 0, 1, // t0 at slot 2 + 0, 0, 1, // t1 at slot 2 + 0, 0, 1, // t2 at slot 2 + ]; + // 3 tasks at slot 2 <= 3 processors OK, makespan = 3 + assert_eq!(p.evaluate(&config), Min(Some(3))); +} + +// ─── evaluate: invalid configs ───────────────────────────────────────────── + +#[test] +fn test_preemptive_scheduling_evaluate_wrong_length() { + let p = small_instance(); + assert_eq!(p.evaluate(&[]), Min(None)); + assert_eq!(p.evaluate(&[1, 1, 0]), Min(None)); // too short + assert_eq!(p.evaluate(&[1, 1, 0, 1, 0, 0, 0]), Min(None)); // too long +} + +#[test] +fn test_preemptive_scheduling_evaluate_wrong_active_count() { + let p = small_instance(); + // t0 needs 2 active slots but gets 1; t1 needs 1 but gets 1 + let config = vec![1, 0, 0, 1, 0, 0]; + assert_eq!(p.evaluate(&config), Min(None)); +} + +#[test] +fn test_preemptive_scheduling_evaluate_processor_overflow() { + // 3 tasks, 2 processors; all three tasks at slot 0 + let p = PreemptiveScheduling::new(vec![1, 1, 1], 2, vec![]); + // D_max=3; all at slot 0 → 3 tasks > 2 processors + let config = vec![1, 0, 0, 1, 0, 0, 1, 0, 0]; + assert_eq!(p.evaluate(&config), Min(None)); +} + +#[test] +fn test_preemptive_scheduling_evaluate_precedence_violation() { + // Task 0 last active slot 1, task 1 first active slot 1 — not strictly less + let p = precedence_instance(); + // D_max=2; t0=[0,1], t1=[0,1] — both active at slot 1; last of pred = 1, first of succ = 0 + // Actually last_pred = 1, first_succ = 0 → 1 >= 0 → violation + let config = vec![0, 1, 1, 0]; + assert_eq!(p.evaluate(&config), Min(None)); +} + +#[test] +fn test_preemptive_scheduling_evaluate_precedence_same_slot() { + // Tasks assigned to the same slot; last_pred = 0, first_succ = 0 → violation + let p = precedence_instance(); + // t0=[1,0], t1=[1,0] + let config = vec![1, 0, 1, 0]; + assert_eq!(p.evaluate(&config), Min(None)); +} + +// ─── paper canonical example ──────────────────────────────────────────────── + +#[test] +fn test_preemptive_scheduling_paper_example() { + // 5 tasks, lengths [2,1,3,2,1], 2 processors, precedences [(0,2),(1,3)] + // Optimal makespan = 5 + let p = PreemptiveScheduling::new(vec![2, 1, 3, 2, 1], 2, vec![(0, 2), (1, 3)]); + let d = p.d_max(); // = 9 + assert_eq!(d, 9); + + let mut config = vec![0usize; 5 * d]; + // t0 (task index 0) occupies config[0..d] + config[0] = 1; // t0 at slot 0 + config[1] = 1; // t0 at slot 1 + // t1 (task index 1) occupies config[d..2*d] + config[d] = 1; // t1 at slot 0 + // t2 (task index 2) occupies config[2*d..3*d] + config[2 * d + 2] = 1; // t2 at slot 2 + config[2 * d + 3] = 1; // t2 at slot 3 + config[2 * d + 4] = 1; // t2 at slot 4 + // t3 (task index 3) occupies config[3*d..4*d] + config[3 * d + 2] = 1; // t3 at slot 2 + config[3 * d + 3] = 1; // t3 at slot 3 + // t4 (task index 4) occupies config[4*d..5*d] + config[4 * d + 1] = 1; // t4 at slot 1 + + assert_eq!(p.evaluate(&config), Min(Some(5))); +} + +// ─── serialization ────────────────────────────────────────────────────────── + +#[test] +fn test_preemptive_scheduling_serialization() { + let p = PreemptiveScheduling::new(vec![2, 1, 3], 2, vec![(0, 2)]); + let json = serde_json::to_value(&p).unwrap(); + let restored: PreemptiveScheduling = serde_json::from_value(json).unwrap(); + assert_eq!(restored.num_tasks(), p.num_tasks()); + assert_eq!(restored.num_processors(), p.num_processors()); + assert_eq!(restored.lengths(), p.lengths()); + assert_eq!(restored.precedences(), p.precedences()); +} + +#[test] +fn test_preemptive_scheduling_serialization_roundtrip_evaluate() { + let p = PreemptiveScheduling::new(vec![1, 1], 2, vec![(0, 1)]); + let json = serde_json::to_value(&p).unwrap(); + let p2: PreemptiveScheduling = serde_json::from_value(json).unwrap(); + // valid: t0 at 0, t1 at 1 + let config = vec![1, 0, 0, 1]; + assert_eq!(p.evaluate(&config), p2.evaluate(&config)); +} + +// ─── validation panics ────────────────────────────────────────────────────── + +#[test] +#[should_panic(expected = "task lengths must be positive")] +fn test_preemptive_scheduling_zero_length() { + PreemptiveScheduling::new(vec![0, 1], 2, vec![]); +} + +#[test] +#[should_panic(expected = "num_processors must be positive")] +fn test_preemptive_scheduling_zero_processors() { + PreemptiveScheduling::new(vec![1, 1], 0, vec![]); +} + +#[test] +#[should_panic(expected = "precedence index out of range")] +fn test_preemptive_scheduling_precedence_out_of_range() { + PreemptiveScheduling::new(vec![1, 1], 2, vec![(0, 5)]); +} + +// ─── serde validation ─────────────────────────────────────────────────────── + +#[test] +fn test_preemptive_scheduling_deserialize_invalid_zero_length() { + let json = serde_json::json!({ + "lengths": [0, 1], + "num_processors": 2, + "precedences": [] + }); + let result: Result = serde_json::from_value(json); + assert!(result.is_err()); +} + +#[test] +fn test_preemptive_scheduling_deserialize_invalid_zero_processors() { + let json = serde_json::json!({ + "lengths": [1, 2], + "num_processors": 0, + "precedences": [] + }); + let result: Result = serde_json::from_value(json); + assert!(result.is_err()); +} diff --git a/src/unit_tests/rules/preemptivescheduling_ilp.rs b/src/unit_tests/rules/preemptivescheduling_ilp.rs new file mode 100644 index 00000000..95a0258a --- /dev/null +++ b/src/unit_tests/rules/preemptivescheduling_ilp.rs @@ -0,0 +1,105 @@ +use super::*; +use crate::models::algebraic::ILP; +use crate::solvers::ILPSolver; +use crate::traits::Problem; +use crate::types::Min; + +// ─── helpers ─────────────────────────────────────────────────────────────── + +/// 2 tasks, lengths [1, 1], 2 processors, precedence (0,1). +/// D_max = 2. Optimal makespan = 2. +fn small_instance() -> PreemptiveScheduling { + PreemptiveScheduling::new(vec![1, 1], 2, vec![(0, 1)]) +} + +/// 3 tasks, lengths [2,1,2], 2 processors, precedence (0,2). +/// D_max = 5. Feasible with makespan ≤ 5. +fn medium_instance() -> PreemptiveScheduling { + PreemptiveScheduling::new(vec![2, 1, 2], 2, vec![(0, 2)]) +} + +// ─── structure ───────────────────────────────────────────────────────────── + +#[test] +fn test_preemptivescheduling_to_ilp_structure() { + let p = small_instance(); + // n=2, D_max=2 → 2*2+1 = 5 variables + let reduction: ReductionPSToILP = ReduceTo::>::reduce_to(&p); + let ilp = reduction.target_problem(); + assert_eq!(ilp.num_vars, 5, "expected n*D_max+1 = 5 variables"); + assert_eq!( + ilp.objective, + vec![(4, 1.0)], + "objective: minimize M at index 4" + ); + + // Constraints: + // 2 work + 2 capacity + 1 prec*(D_max=2 slots) + 2*2 makespan + 2*2 binary = 2+2+2+4+4 = 14 + assert_eq!(ilp.constraints.len(), 14); +} + +// ─── closed-loop ─────────────────────────────────────────────────────────── + +#[test] +fn test_preemptivescheduling_to_ilp_closed_loop() { + let p = small_instance(); + let reduction: ReductionPSToILP = ReduceTo::>::reduce_to(&p); + let ilp_solution = ILPSolver::new() + .solve(reduction.target_problem()) + .expect("ILP should be feasible"); + let extracted = reduction.extract_solution(&ilp_solution); + let value = p.evaluate(&extracted); + assert!( + value.0.is_some(), + "extracted schedule should be valid, got {value:?}" + ); +} + +#[test] +fn test_preemptivescheduling_to_ilp_medium_closed_loop() { + let p = medium_instance(); + let reduction: ReductionPSToILP = ReduceTo::>::reduce_to(&p); + let ilp_solution = ILPSolver::new() + .solve(reduction.target_problem()) + .expect("ILP should be feasible"); + let extracted = reduction.extract_solution(&ilp_solution); + let value = p.evaluate(&extracted); + assert!( + value.0.is_some(), + "extracted schedule should be valid, got {value:?}" + ); + assert!( + value.0.map(|v| v <= 5).unwrap_or(false), + "makespan should be at most 5, got {value:?}" + ); +} + +// ─── infeasible ──────────────────────────────────────────────────────────── + +#[test] +fn test_preemptivescheduling_to_ilp_infeasible() { + // 1 processor, tasks t0→t1→t0 would be a cycle — let's just make a + // tight instance: 1 processor, 1 task of length 1, always feasible. + // Actually, let's check that a huge task on 1 tiny processor is fine + // (it's always feasible; makespan is just larger). + // Use a cycle-free precedence that is always schedulable. + let p = PreemptiveScheduling::new(vec![1, 1], 1, vec![(0, 1)]); + let reduction: ReductionPSToILP = ReduceTo::>::reduce_to(&p); + let sol = ILPSolver::new().solve(reduction.target_problem()); + // 1 processor, t0 at slot 0, t1 at slot 1 → always feasible + assert!(sol.is_some(), "should be feasible"); +} + +// ─── extract_solution ────────────────────────────────────────────────────── + +#[test] +fn test_preemptivescheduling_to_ilp_extract_solution() { + // small_instance: n=2, D_max=2, m_var=4 + // x_{0,0}=1, x_{0,1}=0, x_{1,0}=0, x_{1,1}=1, M=2 + let p = small_instance(); + let reduction: ReductionPSToILP = ReduceTo::>::reduce_to(&p); + let ilp_solution = vec![1, 0, 0, 1, 2]; // last element is M + let extracted = reduction.extract_solution(&ilp_solution); + assert_eq!(extracted, vec![1, 0, 0, 1]); + assert_eq!(p.evaluate(&extracted), Min(Some(2))); +} From 54a63d14f1b7febb3ba69b639f3125b374761e3e Mon Sep 17 00:00:00 2001 From: Xiwei Pan Date: Mon, 30 Mar 2026 07:30:18 +0800 Subject: [PATCH 06/21] Fix #506: Add OpenShopScheduling model and direct ILP rule MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Implements the Open Shop Scheduling optimization model (minimize makespan) with a direct ILP reduction using disjunctive formulation (binary ordering variables + integer start times + makespan objective). Canonical example uses the 4 jobs × 3 machines instance with true optimal makespan = 8. Co-Authored-By: Claude Sonnet 4.6 --- docs/paper/reductions.typ | 134 ++++++++ docs/paper/references.bib | 11 + src/models/misc/mod.rs | 4 + src/models/misc/open_shop_scheduling.rs | 269 ++++++++++++++++ src/rules/mod.rs | 3 + src/rules/openshopscheduling_ilp.rs | 286 ++++++++++++++++++ .../models/misc/open_shop_scheduling.rs | 232 ++++++++++++++ .../rules/openshopscheduling_ilp.rs | 143 +++++++++ 8 files changed, 1082 insertions(+) create mode 100644 src/models/misc/open_shop_scheduling.rs create mode 100644 src/rules/openshopscheduling_ilp.rs create mode 100644 src/unit_tests/models/misc/open_shop_scheduling.rs create mode 100644 src/unit_tests/rules/openshopscheduling_ilp.rs diff --git a/docs/paper/reductions.typ b/docs/paper/reductions.typ index f923f3ed..47746047 100644 --- a/docs/paper/reductions.typ +++ b/docs/paper/reductions.typ @@ -163,6 +163,7 @@ "MinMaxMulticenter": [Min-Max Multicenter], "FlowShopScheduling": [Flow Shop Scheduling], "JobShopScheduling": [Job-Shop Scheduling], + "OpenShopScheduling": [Open Shop Scheduling], "GroupingBySwapping": [Grouping by Swapping], "IntegerExpressionMembership": [Integer Expression Membership], "MinimumCutIntoBoundedSets": [Minimum Cut Into Bounded Sets], @@ -5587,6 +5588,119 @@ A classical NP-complete problem from Garey and Johnson @garey1979[Ch.~3, p.~76], ] } +#{ + let x = load-model-example("OpenShopScheduling") + let p = x.instance.processing_times + let m = x.instance.num_machines + let n = p.len() + let cfg = x.optimal_config + // Decode per-machine orderings: cfg[i*n..(i+1)*n] is machine i's job order + let orders = range(m).map(i => cfg.slice(i * n, (i + 1) * n)) + + // Greedy simulation to compute start times + let machine-avail = range(m).map(_ => 0) + let job-avail = range(n).map(_ => 0) + let next-on = range(m).map(_ => 0) + let start-times = range(n).map(_ => range(m).map(_ => 0)) + let finish-times = range(n).map(_ => range(m).map(_ => 0)) + + let total-tasks = n * m + let scheduled = 0 + while scheduled < total-tasks { + // Find machine with earliest next start + let best-start = 999999 + let best-machine = -1 + for i in range(m) { + if next-on.at(i) < n { + let j = orders.at(i).at(next-on.at(i)) + let s = calc.max(machine-avail.at(i), job-avail.at(j)) + if s < best-start or (s == best-start and (best-machine == -1 or i < best-machine)) { + best-start = s + best-machine = i + } + } + } + let i = best-machine + let j = orders.at(i).at(next-on.at(i)) + let s = calc.max(machine-avail.at(i), job-avail.at(j)) + let f = s + p.at(j).at(i) + start-times.at(j).at(i) = s + finish-times.at(j).at(i) = f + machine-avail.at(i) = f + job-avail.at(j) = f + next-on.at(i) += 1 + scheduled += 1 + } + + let makespan = calc.max(..range(n).map(j => calc.max(..range(m).map(i => finish-times.at(j).at(i))))) + + [ + #problem-def("OpenShopScheduling")[ + Given $m$ machines and a set $J$ of $n$ jobs, where each job $j in J$ has one task per machine $i$ with processing time $p(j, i) in ZZ^+_0$, find a non-preemptive schedule minimizing the *makespan* $max_(j,i)(sigma(j, i) + p(j, i))$, subject to: + 1. *Machine constraint:* Each machine processes at most one job at a time. + 2. *Job constraint:* Each job occupies at most one machine at a time. + Unlike flow-shop or job-shop scheduling, there is no prescribed order for a job's tasks across machines. + ][ + Open Shop Scheduling is problem SS14 in Garey and Johnson's catalog @garey1979 (decision version: does a schedule exist with makespan $<= D$?). NP-completeness for $m >= 3$ machines was established by Gonzalez and Sahni via reduction from Partition @gonzalez1976. The problem is solvable in polynomial time for $m = 2$ and also for the preemptive variant with any $m$ @gonzalez1976. This codebase evaluates a candidate schedule by simulating a greedy active schedule: for each step, the machine with the earliest feasible next-job start is processed next. The configuration encodes one permutation of jobs per machine (direct indices), giving $(n!)^m$ candidate orderings. + + *Example.* Let $m = #m$ machines and $n = #n$ jobs with processing times + #align(center, math.equation([$P = #math.mat(..p.map(row => row.map(v => [#v])))$])) + The canonical optimal orderings are: + #align(center, table( + columns: 2, + align: (left, left), + table.header([Machine], [Job order]), + ..range(m).map(i => ([M#(i+1)], orders.at(i).map(j => [$J_#(j+1)$]).join[$,$])).flatten() + )) + giving the Gantt chart in @fig:openshop and makespan *#makespan*. + + #pred-commands( + "pred create --example " + problem-spec(x) + " -o open-shop-scheduling.json", + "pred solve open-shop-scheduling.json", + "pred evaluate open-shop-scheduling.json --config " + x.optimal_config.map(str).join(","), + ) + + #figure( + canvas(length: 1cm, { + import draw: * + let colors = (rgb("#4e79a7"), rgb("#e15759"), rgb("#76b7b2"), rgb("#f28e2b"), rgb("#59a14f"), rgb("#b07aa1")) + let scale = 0.55 + let row-h = 0.6 + let gap = 0.15 + + for mi in range(m) { + let y = -mi * (row-h + gap) + content((-0.8, y), text(8pt, "M" + str(mi + 1))) + } + + for j in range(n) { + for i in range(m) { + let s = start-times.at(j).at(i) + let f = finish-times.at(j).at(i) + let x0 = s * scale + let x1 = f * scale + let y = -i * (row-h + gap) + rect((x0, y - row-h / 2), (x1, y + row-h / 2), + fill: colors.at(j).transparentize(30%), stroke: 0.4pt + colors.at(j)) + content(((x0 + x1) / 2, y), text(6pt, [$J_#(j + 1)$])) + } + } + + let y-axis = -(m - 1) * (row-h + gap) - row-h / 2 - 0.2 + line((0, y-axis), (makespan * scale, y-axis), stroke: 0.4pt) + for t in range(makespan + 1) { + let x = t * scale + line((x, y-axis), (x, y-axis - 0.1), stroke: 0.4pt) + content((x, y-axis - 0.25), text(6pt, str(t))) + } + content((makespan * scale / 2, y-axis - 0.5), text(7pt)[$t$]) + }), + caption: [Open-shop schedule for #n jobs on #m machines. Optimal makespan is #makespan. Each color represents one job; no two tasks of the same job overlap in time.], + ) + ] + ] +} + #problem-def("StaffScheduling")[ Given a collection $C$ of binary schedule patterns of length $m$, where each pattern has exactly $k$ ones, a requirement vector $overline(R) in ZZ_(>= 0)^m$, and a worker budget $n in ZZ_(>= 0)$, determine whether there exists a function $f: C -> ZZ_(>= 0)$ such that $sum_(c in C) f(c) <= n$ and $sum_(c in C) f(c) dot c >= overline(R)$ component-wise. ][ @@ -9547,6 +9661,26 @@ The following reductions to Integer Linear Programming are straightforward formu _Solution extraction._ Sort the jobs by their final-machine completion times $C_(j,m)$ and convert that permutation to Lehmer code. ] +#reduction-rule("OpenShopScheduling", "ILP")[ + Binary ordering variables and integer start times encode the disjunctive non-overlap constraints for both machines and jobs; the makespan is the minimized objective. +][ + _Construction._ Let $M = sum_(j,i) p(j,i)$ be the big-$M$ constant (an upper bound on the makespan). For each pair $j < k$ and each machine $i$, let $x_{j k i} in {0,1}$ with $x_{j k i} = 1$ iff job $j$ precedes job $k$ on machine $i$. For each job $j$ and pair of machines $i < i'$, let $y_{j i i'} in {0,1}$ with $y_{j i i'} = 1$ iff machine $i$ is processed before machine $i'$ for job $j$. Let $s_{j,i} in ZZ_{>=0}$ be the start time of job $j$ on machine $i$, and $C$ be the integer makespan variable. The ILP is: + $ + min quad & C \ + "subject to" quad + & s_(k,i) - s_(j,i) - M x_(j k i) >= p(j, i) - M quad forall j < k, i \ + & s_(j,i) - s_(k,i) + M x_(j k i) >= p(k, i) quad forall j < k, i \ + & s_(j,i') - s_(j,i) - M y_(j i i') >= p(j, i) - M quad forall j, i < i' \ + & s_(j,i) - s_(j,i') + M y_(j i i') >= p(j, i') quad forall j, i < i' \ + & C - s_(j,i) >= p(j, i) quad forall j, i \ + & x_(j k i), y_(j i i') in {0,1},; s_(j,i), C in ZZ_(>=0) + $. + + _Correctness._ ($arrow.r.double$) Any feasible open-shop schedule with the given permutations $sigma_i$ induces valid ordering bits $x_{j k i}$ and $y_{j i i'}$ and start times satisfying all non-overlap constraints. ($arrow.l.double$) Any feasible ILP solution defines non-overlapping start times for all tasks, respecting both machine and job constraints. + + _Solution extraction._ For each machine $i$, sort jobs by their ILP start times $s_{j,i}$ to recover the per-machine permutation; output the concatenation of these $m$ direct-index permutations. +] + #reduction-rule("MinimumTardinessSequencing", "ILP")[ A position-assignment ILP captures the permutation, the precedence constraints, and a binary tardy indicator for each unit-length task. ][ diff --git a/docs/paper/references.bib b/docs/paper/references.bib index f7d6f456..b82bb5b3 100644 --- a/docs/paper/references.bib +++ b/docs/paper/references.bib @@ -1558,3 +1558,14 @@ @article{Murty1972 pages = {326--370}, doi = {10.1007/BF01584550} } + +@article{gonzalez1976, + author = {Teofilo Gonzalez and Sartaj Sahni}, + title = {Open Shop Scheduling to Minimize Finish Time}, + journal = {Journal of the ACM}, + volume = {23}, + number = {4}, + pages = {665--679}, + year = {1976}, + doi = {10.1145/321978.321985} +} diff --git a/src/models/misc/mod.rs b/src/models/misc/mod.rs index 86391564..9a0f323e 100644 --- a/src/models/misc/mod.rs +++ b/src/models/misc/mod.rs @@ -15,6 +15,7 @@ //! - [`JobShopScheduling`]: Minimize makespan with per-job processor routes //! - [`Knapsack`]: 0-1 Knapsack (maximize value subject to weight capacity) //! - [`MultiprocessorScheduling`]: Schedule tasks on processors to meet a deadline +//! - [`OpenShopScheduling`]: Open Shop Scheduling (minimize makespan, free task order per job) //! - [`LongestCommonSubsequence`]: Longest Common Subsequence //! - [`MinimumExternalMacroDataCompression`]: Minimize compression cost using external dictionary //! - [`MinimumInternalMacroDataCompression`]: Minimize self-referencing compression cost @@ -91,6 +92,7 @@ mod minimum_external_macro_data_compression; mod minimum_internal_macro_data_compression; mod minimum_tardiness_sequencing; mod multiprocessor_scheduling; +mod open_shop_scheduling; pub(crate) mod paintshop; pub(crate) mod partially_ordered_knapsack; pub(crate) mod partition; @@ -142,6 +144,7 @@ pub use minimum_external_macro_data_compression::MinimumExternalMacroDataCompres pub use minimum_internal_macro_data_compression::MinimumInternalMacroDataCompression; pub use minimum_tardiness_sequencing::MinimumTardinessSequencing; pub use multiprocessor_scheduling::MultiprocessorScheduling; +pub use open_shop_scheduling::OpenShopScheduling; pub use paintshop::PaintShop; pub use partially_ordered_knapsack::PartiallyOrderedKnapsack; pub use partition::Partition; @@ -183,6 +186,7 @@ pub(crate) fn canonical_model_example_specs() -> Vec>", description: "processing_times[j][i] = processing time of job j on machine i (n x m)" }, + ], + } +} + +/// The Open Shop Scheduling problem. +/// +/// Given `m` machines and `n` jobs, where job `j` has one task on each machine +/// `i` with processing time `p[j][i]`, find a non-preemptive schedule that +/// minimizes the makespan. Unlike flow-shop or job-shop scheduling, there is no +/// prescribed order for the tasks of a given job — each job's tasks may be +/// processed on the machines in any order. +/// +/// # Constraints +/// +/// 1. **Machine constraint:** Each machine processes at most one job at a time. +/// 2. **Job constraint:** Each job occupies at most one machine at a time. +/// +/// # Configuration Encoding +/// +/// The configuration is a flat array of `n * m` values. +/// `config[i * n .. (i + 1) * n]` gives the permutation of jobs on machine `i` +/// (direct job indices, not Lehmer code). A segment is valid iff it is a +/// permutation of `0..n`. Invalid configs return `Min(None)`. +/// +/// # Example +/// +/// ``` +/// use problemreductions::models::misc::OpenShopScheduling; +/// use problemreductions::{Problem, Solver, BruteForce}; +/// use problemreductions::types::Min; +/// +/// // 2 machines, 2 jobs +/// let p = vec![vec![1, 2], vec![2, 1]]; +/// let problem = OpenShopScheduling::new(2, p); +/// let solver = BruteForce::new(); +/// let value = Solver::solve(&solver, &problem); +/// assert_eq!(value, Min(Some(3))); +/// ``` +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct OpenShopScheduling { + /// Number of machines m. + num_machines: usize, + /// Processing time matrix: `processing_times[j][i]` is the time to process + /// job `j` on machine `i`. Dimensions: n jobs × m machines. + processing_times: Vec>, +} + +impl OpenShopScheduling { + /// Create a new Open Shop Scheduling instance. + /// + /// # Arguments + /// * `num_machines` - Number of machines m + /// * `processing_times` - `processing_times[j][i]` = processing time of job j on machine i. + /// Each inner Vec must have length `num_machines`. + /// + /// # Panics + /// Panics if any job does not have exactly `num_machines` processing times. + pub fn new(num_machines: usize, processing_times: Vec>) -> Self { + for (j, times) in processing_times.iter().enumerate() { + assert_eq!( + times.len(), + num_machines, + "Job {} has {} processing times, expected {}", + j, + times.len(), + num_machines + ); + } + Self { + num_machines, + processing_times, + } + } + + /// Get the number of machines. + pub fn num_machines(&self) -> usize { + self.num_machines + } + + /// Get the number of jobs. + pub fn num_jobs(&self) -> usize { + self.processing_times.len() + } + + /// Get the processing time matrix. + pub fn processing_times(&self) -> &[Vec] { + &self.processing_times + } + + /// Decode the per-machine job orderings from a config. + /// + /// Returns `None` if the config length is wrong or any segment is not a + /// valid permutation of `0..n`. + pub fn decode_orders(&self, config: &[usize]) -> Option>> { + let n = self.num_jobs(); + let m = self.num_machines; + if config.len() != n * m { + return None; + } + let mut orders = Vec::with_capacity(m); + for i in 0..m { + let seg = &config[i * n..(i + 1) * n]; + // Validate that seg is a permutation of 0..n + let mut seen = vec![false; n]; + for &job in seg { + if job >= n || seen[job] { + return None; + } + seen[job] = true; + } + orders.push(seg.to_vec()); + } + Some(orders) + } + + /// Compute the makespan from a set of per-machine job orderings. + /// + /// Uses a greedy simulation: at each step, among all machines whose next + /// scheduled job can start (both machine and job are free), schedule the + /// one with the earliest available start time. + pub fn compute_makespan(&self, orders: &[Vec]) -> usize { + let n = self.num_jobs(); + let m = self.num_machines; + + if n == 0 || m == 0 { + return 0; + } + + // `machine_avail[i]` = next time machine i is free. + let mut machine_avail = vec![0usize; m]; + // `job_avail[j]` = next time job j is free (all its currently scheduled + // tasks have finished). + let mut job_avail = vec![0usize; n]; + // Pointer to next unscheduled position in each machine's ordering. + let mut next_on_machine = vec![0usize; m]; + + let total_tasks = n * m; + let mut scheduled = 0; + + while scheduled < total_tasks { + // Find the (machine, earliest start time) among all machines that + // still have unscheduled tasks. + let mut best_start = usize::MAX; + let mut best_machine = usize::MAX; + + for i in 0..m { + if next_on_machine[i] < n { + let j = orders[i][next_on_machine[i]]; + let start = machine_avail[i].max(job_avail[j]); + // Tie-break by machine index to make the result deterministic. + if start < best_start || (start == best_start && i < best_machine) { + best_start = start; + best_machine = i; + } + } + } + + // Schedule the chosen task. + let i = best_machine; + let j = orders[i][next_on_machine[i]]; + let start = machine_avail[i].max(job_avail[j]); + let finish = start + self.processing_times[j][i]; + machine_avail[i] = finish; + job_avail[j] = finish; + next_on_machine[i] += 1; + scheduled += 1; + } + + machine_avail + .iter() + .copied() + .max() + .unwrap_or(0) + .max(job_avail.iter().copied().max().unwrap_or(0)) + } +} + +impl Problem for OpenShopScheduling { + const NAME: &'static str = "OpenShopScheduling"; + type Value = Min; + + fn variant() -> Vec<(&'static str, &'static str)> { + crate::variant_params![] + } + + fn dims(&self) -> Vec { + let n = self.num_jobs(); + let m = self.num_machines; + vec![n; n * m] + } + + fn evaluate(&self, config: &[usize]) -> Min { + match self.decode_orders(config) { + Some(orders) => Min(Some(self.compute_makespan(&orders))), + None => Min(None), + } + } +} + +crate::declare_variants! { + default OpenShopScheduling => "factorial(num_jobs)^num_machines", +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_model_example_specs() -> Vec { + // 4 jobs × 3 machines example from issue #506. + // processing_times[j][i]: + // J1: p[0] = [3, 1, 2] + // J2: p[1] = [2, 3, 1] + // J3: p[2] = [1, 2, 3] + // J4: p[3] = [2, 2, 1] + // + // Per-machine totals: M1=8, M2=8, M3=7. Per-job totals: J1=6, J2=6, J3=6, J4=5. + // Lower bound: max(8, 6) = 8. True optimal makespan = 8. + // + // Optimal machine orderings (0-indexed jobs): + // M1: [J1, J2, J3, J4] = [0, 1, 2, 3] + // M2: [J2, J1, J4, J3] = [1, 0, 3, 2] + // M3: [J3, J4, J1, J2] = [2, 3, 0, 1] + // + // config = [M1 order | M2 order | M3 order] + // = [0, 1, 2, 3, 1, 0, 3, 2, 2, 3, 0, 1] + // + // Resulting schedule: + // J1: M1=[0,3), M2=[7,8), M3=[1,3) — job non-overlap: [0,3),[1,3) overlap! + // Actually use simulation to verify: + // Step 1: best start = M1(J1:0), M2(J2:0), M3(J3:0) → M1 ties with M2,M3; pick M1 + // J1 on M1: [0,3) + // ... (simulation produces makespan=8) + // + // 224 out of 13824 orderings achieve the optimal makespan of 8. + vec![crate::example_db::specs::ModelExampleSpec { + id: "open_shop_scheduling", + instance: Box::new(OpenShopScheduling::new( + 3, + vec![vec![3, 1, 2], vec![2, 3, 1], vec![1, 2, 3], vec![2, 2, 1]], + )), + optimal_config: vec![0, 1, 2, 3, 1, 0, 3, 2, 2, 3, 0, 1], + optimal_value: serde_json::json!(8), + }] +} + +#[cfg(test)] +#[path = "../../unit_tests/models/misc/open_shop_scheduling.rs"] +mod tests; diff --git a/src/rules/mod.rs b/src/rules/mod.rs index 90abab79..17e91265 100644 --- a/src/rules/mod.rs +++ b/src/rules/mod.rs @@ -182,6 +182,8 @@ pub(crate) mod multiprocessorscheduling_ilp; #[cfg(feature = "ilp-solver")] pub(crate) mod naesatisfiability_ilp; #[cfg(feature = "ilp-solver")] +pub(crate) mod openshopscheduling_ilp; +#[cfg(feature = "ilp-solver")] pub(crate) mod optimallineararrangement_ilp; #[cfg(feature = "ilp-solver")] pub(crate) mod paintshop_ilp; @@ -370,6 +372,7 @@ pub(crate) fn canonical_rule_example_specs() -> Vec. +//! +//! Disjunctive formulation with binary ordering variables and integer start times: +//! +//! **Variables:** +//! - `x_{j,k,i}` for j < k, all machines i: binary, 1 if job j precedes job k on machine i. +//! Index: pair index * m + i, where pair index = `j*(2n-j-1)/2 + (k-j-1)`. +//! Count: n*(n-1)/2 * m variables. +//! - `s_{j,i}` for all (j, i): integer start time of job j on machine i. +//! Index: num_order_vars + j * m + i. +//! Count: n * m variables. +//! - `C` (makespan): integer, index num_order_vars + n * m. +//! +//! **Constraints:** +//! 1. Binary bounds: 0 ≤ x_{j,k,i} ≤ 1 for all j < k, i. +//! 2. Machine non-overlap for each pair (j, k) and machine i: +//! - s_{k,i} ≥ s_{j,i} + p_{j,i} - M*(1 - x_{j,k,i}) → s_{k,i} - s_{j,i} + M*x_{j,k,i} ≥ p_{j,i} +//! - s_{j,i} ≥ s_{k,i} + p_{k,i} - M*x_{j,k,i} → s_{j,i} - s_{k,i} - M*x_{j,k,i} ≥ p_{k,i} - M +//! 3. Job non-overlap for each job j and each pair of machines (i, i'): +//! Uses separate binary variable y_{j,i,i'} for i < i' to decide which task runs first. +//! Variables y_{j,i,i'}: appended after s variables. +//! - s_{j,i'} ≥ s_{j,i} + p_{j,i} - M*(1 - y_{j,i,i'}) +//! - s_{j,i} ≥ s_{j,i'} + p_{j,i'} - M*y_{j,i,i'} +//! 4. Makespan: C ≥ s_{j,i} + p_{j,i} for all (j, i). +//! 5. Non-negativity of start times: s_{j,i} ≥ 0 (implied by ILP non-negativity). +//! +//! **Objective:** Minimize C. + +use crate::models::algebraic::{LinearConstraint, ObjectiveSense, ILP}; +use crate::models::misc::OpenShopScheduling; +use crate::reduction; +use crate::rules::traits::{ReduceTo, ReductionResult}; + +/// Result of reducing OpenShopScheduling to ILP. +/// +/// Variable layout: +/// - `x_{j,k,i}` at index `pair_idx(j,k) * m + i` (num_pairs * m vars) +/// - `s_{j,i}` at index `num_order_vars + j * m + i` (n * m vars) +/// - `y_{j,i,i'}` for i < i': at `num_order_vars + n*m + j * num_machine_pairs + machine_pair_idx(i,i')` +/// (n * m*(m-1)/2 vars) +/// - `C`: at index `num_order_vars + n * m + n * m*(m-1)/2` (1 var) +#[derive(Debug, Clone)] +pub struct ReductionOSSToILP { + target: ILP, + num_jobs: usize, + num_machines: usize, + /// n*(n-1)/2 * m — start index of s_{j,i} variables + num_order_vars: usize, +} + +impl ReductionOSSToILP { + fn pair_idx(&self, j: usize, k: usize) -> usize { + debug_assert!(j < k); + let n = self.num_jobs; + j * (2 * n - j - 1) / 2 + (k - j - 1) + } + + fn x_var(&self, j: usize, k: usize, i: usize) -> usize { + self.pair_idx(j, k) * self.num_machines + i + } + + fn s_var(&self, j: usize, i: usize) -> usize { + self.num_order_vars + j * self.num_machines + i + } + + fn machine_pair_idx(&self, i: usize, ip: usize) -> usize { + debug_assert!(i < ip); + let m = self.num_machines; + i * (2 * m - i - 1) / 2 + (ip - i - 1) + } + + fn y_var(&self, j: usize, i: usize, ip: usize) -> usize { + let num_machine_pairs = self.num_machines * self.num_machines.saturating_sub(1) / 2; + self.num_order_vars + + self.num_jobs * self.num_machines + + j * num_machine_pairs + + self.machine_pair_idx(i, ip) + } +} + +impl ReductionResult for ReductionOSSToILP { + type Source = OpenShopScheduling; + type Target = ILP; + + fn target_problem(&self) -> &ILP { + &self.target + } + + /// Extract per-machine job orderings from the ILP start times, then + /// convert to the config format (direct permutation indices per machine). + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + let n = self.num_jobs; + let m = self.num_machines; + + // Read start times s_{j,i} for each (j, i) + let start = |j: usize, i: usize| -> usize { + let idx = self.num_order_vars + j * m + i; + target_solution.get(idx).copied().unwrap_or(0) + }; + + // For each machine, sort jobs by their start time on that machine + let mut config = Vec::with_capacity(n * m); + for i in 0..m { + let mut jobs: Vec = (0..n).collect(); + jobs.sort_by_key(|&j| (start(j, i), j)); + config.extend(jobs); + } + config + } +} + +#[reduction(overhead = { + num_vars = "num_jobs * (num_jobs - 1) / 2 * num_machines + num_jobs * num_machines + num_jobs * num_machines * (num_machines - 1) / 2 + 1", + num_constraints = "num_jobs * (num_jobs - 1) / 2 * num_machines + num_jobs * num_machines + 1 + 2 * num_jobs * (num_jobs - 1) / 2 * num_machines + num_jobs * num_machines * (num_machines - 1) / 2 + 2 * num_jobs * num_machines * (num_machines - 1) / 2 + num_jobs * num_machines", +})] +impl ReduceTo> for OpenShopScheduling { + type Result = ReductionOSSToILP; + + fn reduce_to(&self) -> Self::Result { + let n = self.num_jobs(); + let m = self.num_machines(); + let p = self.processing_times(); + + let num_pairs = n * n.saturating_sub(1) / 2; + let num_machine_pairs = m * m.saturating_sub(1) / 2; + + // Variable counts + let num_order_vars = num_pairs * m; // x_{j,k,i}: binary + let num_start_vars = n * m; // s_{j,i}: integer + let num_job_pair_vars = n * num_machine_pairs; // y_{j,i,i'}: binary + let num_vars = num_order_vars + num_start_vars + num_job_pair_vars + 1; // +1 for C + + let result = ReductionOSSToILP { + target: ILP::new(0, vec![], vec![], ObjectiveSense::Minimize), + num_jobs: n, + num_machines: m, + num_order_vars, + }; + + // Big-M: sum of all processing times (loose upper bound on makespan) + let total_p: usize = p.iter().flat_map(|row| row.iter()).sum(); + let big_m = total_p as f64; + + let c_var = num_order_vars + num_start_vars + num_job_pair_vars; + + let mut constraints = Vec::new(); + + // 1. Binary bounds on x_{j,k,i}: 0 ≤ x ≤ 1 + for j in 0..n { + for k in (j + 1)..n { + for i in 0..m { + let x = result.x_var(j, k, i); + constraints.push(LinearConstraint::le(vec![(x, 1.0)], 1.0)); + } + } + } + + // Upper bounds on start time variables: s_{j,i} ≤ total_p + // (no task can start after all tasks have finished) + for j in 0..n { + for i in 0..m { + let sji = result.s_var(j, i); + constraints.push(LinearConstraint::le(vec![(sji, 1.0)], big_m)); + } + } + + // Upper bound on makespan C ≤ total_p + constraints.push(LinearConstraint::le(vec![(c_var, 1.0)], big_m)); + + // 2. Machine non-overlap: for each pair (j,k) with j= p_{j,i} - M + constraints.push(LinearConstraint::ge( + vec![(sk, 1.0), (sj, -1.0), (x, -big_m)], + pji - big_m, + )); + + // (b) s_{j,i} - s_{k,i} + M*x_{j,k,i} >= p_{k,i} + constraints.push(LinearConstraint::ge( + vec![(sj, 1.0), (sk, -1.0), (x, big_m)], + pki, + )); + } + } + } + + // 3. Binary bounds on y_{j,i,i'}: 0 ≤ y ≤ 1 + for j in 0..n { + for i in 0..m { + for ip in (i + 1)..m { + let y = result.y_var(j, i, ip); + constraints.push(LinearConstraint::le(vec![(y, 1.0)], 1.0)); + } + } + } + + // 4. Job non-overlap: for each job j and each pair (i, i') with i < i' + // y_{j,i,i'}=1 means machine i is scheduled before machine i' for job j: + // (a) s_{j,i'} ≥ s_{j,i} + p_{j,i} - M*(1-y) + // s_{j,i'} - s_{j,i} - M*y ≥ p_{j,i} - M + // (b) s_{j,i} ≥ s_{j,i'} + p_{j,i'} - M*y + // s_{j,i} - s_{j,i'} + M*y ≥ p_{j,i'} + for (j, pj) in p.iter().enumerate() { + for i in 0..m { + for ip in (i + 1)..m { + let y = result.y_var(j, i, ip); + let sji = result.s_var(j, i); + let sjip = result.s_var(j, ip); + let pji = pj[i] as f64; + let pjip = pj[ip] as f64; + + // (a) s_{j,i'} - s_{j,i} - M*y >= p_{j,i} - M + constraints.push(LinearConstraint::ge( + vec![(sjip, 1.0), (sji, -1.0), (y, -big_m)], + pji - big_m, + )); + + // (b) s_{j,i} - s_{j,i'} + M*y >= p_{j,i'} + constraints.push(LinearConstraint::ge( + vec![(sji, 1.0), (sjip, -1.0), (y, big_m)], + pjip, + )); + } + } + } + + // 5. Makespan: C ≥ s_{j,i} + p_{j,i} ⟺ C - s_{j,i} ≥ p_{j,i} + for (j, pj) in p.iter().enumerate() { + for (i, &pji) in pj.iter().enumerate() { + let sji = result.s_var(j, i); + constraints.push(LinearConstraint::ge( + vec![(c_var, 1.0), (sji, -1.0)], + pji as f64, + )); + } + } + + // Objective: minimize C + let objective = vec![(c_var, 1.0)]; + + ReductionOSSToILP { + target: ILP::new(num_vars, constraints, objective, ObjectiveSense::Minimize), + num_jobs: n, + num_machines: m, + num_order_vars, + } + } +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_rule_example_specs() -> Vec { + vec![crate::example_db::specs::RuleExampleSpec { + id: "openshopscheduling_to_ilp", + build: || { + // Small 2x2 instance for canonical example + let source = OpenShopScheduling::new(2, vec![vec![1, 2], vec![2, 1]]); + crate::example_db::specs::rule_example_via_ilp::<_, i32>(source) + }, + }] +} + +#[cfg(test)] +#[path = "../unit_tests/rules/openshopscheduling_ilp.rs"] +mod tests; diff --git a/src/unit_tests/models/misc/open_shop_scheduling.rs b/src/unit_tests/models/misc/open_shop_scheduling.rs new file mode 100644 index 00000000..2c493cda --- /dev/null +++ b/src/unit_tests/models/misc/open_shop_scheduling.rs @@ -0,0 +1,232 @@ +use super::*; +use crate::solvers::{BruteForce, Solver}; +use crate::traits::Problem; +use crate::types::Min; + +/// 2 machines, 2 jobs: smallest non-trivial instance. +/// processing_times[j][i]: J1=[1,2], J2=[2,1] +/// All four orderings give the same makespan = 3 (symmetric). +fn two_by_two() -> OpenShopScheduling { + OpenShopScheduling::new(2, vec![vec![1, 2], vec![2, 1]]) +} + +/// 3 machines, 3 jobs: a small asymmetric instance. +fn three_by_three() -> OpenShopScheduling { + OpenShopScheduling::new(3, vec![vec![1, 2, 3], vec![3, 2, 1], vec![2, 1, 2]]) +} + +/// Issue #506 example: 4 jobs × 3 machines, true optimal makespan = 8. +/// (The issue body incorrectly claimed 11 was optimal; brute-force confirms 8.) +fn issue_example() -> OpenShopScheduling { + OpenShopScheduling::new( + 3, + vec![vec![3, 1, 2], vec![2, 3, 1], vec![1, 2, 3], vec![2, 2, 1]], + ) +} + +// ─── creation and dims ─────────────────────────────────────────────────────── + +#[test] +fn test_open_shop_scheduling_creation() { + let p = issue_example(); + assert_eq!(p.num_machines(), 3); + assert_eq!(p.num_jobs(), 4); + assert_eq!( + p.processing_times(), + &[ + vec![3usize, 1, 2], + vec![2, 3, 1], + vec![1, 2, 3], + vec![2, 2, 1], + ] + ); +} + +#[test] +fn test_open_shop_scheduling_dims() { + let p = issue_example(); + // n = 4 jobs, m = 3 machines → n*m = 12 config variables, each in 0..4 + assert_eq!(p.dims(), vec![4usize; 12]); + + let p2 = two_by_two(); + assert_eq!(p2.dims(), vec![2usize; 4]); +} + +// ─── evaluate ──────────────────────────────────────────────────────────────── + +#[test] +fn test_open_shop_scheduling_evaluate_issue_example_optimal() { + let p = issue_example(); + // Optimal config: M1=[0,1,2,3], M2=[1,0,3,2], M3=[2,3,0,1] + // True optimal makespan = 8 (the issue body incorrectly claimed 11). + let config = vec![0, 1, 2, 3, 1, 0, 3, 2, 2, 3, 0, 1]; + assert_eq!(p.evaluate(&config), Min(Some(8))); +} + +#[test] +fn test_open_shop_scheduling_evaluate_issue_example_suboptimal_schedule() { + let p = issue_example(); + // The schedule from the issue body: M1=[2,1,0,3], M2=[2,1,0,3], M3=[2,0,1,3] + // gives makespan 11, which is valid but not optimal (optimal is 8). + let config = vec![2, 1, 0, 3, 2, 1, 0, 3, 2, 0, 1, 3]; + let value = p.evaluate(&config); + assert_eq!(value, Min(Some(11))); +} + +#[test] +fn test_open_shop_scheduling_evaluate_suboptimal() { + let p = issue_example(); + // Identity orderings on all machines: M1=[0,1,2,3], M2=[0,1,2,3], M3=[0,1,2,3] + let config = vec![0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3]; + let value = p.evaluate(&config); + // Must be valid and > 8 (non-optimal) + assert!(value.0.is_some()); + assert!(value.0.unwrap() > 8); +} + +#[test] +fn test_open_shop_scheduling_evaluate_invalid_not_permutation() { + let p = issue_example(); + // config[0..4] = [0,0,0,0] is not a permutation → invalid + let config = vec![0, 0, 0, 0, 0, 1, 2, 3, 0, 1, 2, 3]; + assert_eq!(p.evaluate(&config), Min(None)); +} + +#[test] +fn test_open_shop_scheduling_evaluate_wrong_length() { + let p = issue_example(); + // Too short + assert_eq!(p.evaluate(&[0, 1, 2]), Min(None)); + // Too long + assert_eq!(p.evaluate(&[0; 13]), Min(None)); +} + +#[test] +fn test_open_shop_scheduling_evaluate_empty() { + let p = OpenShopScheduling::new(3, vec![]); + assert_eq!(p.dims(), Vec::::new()); + assert_eq!(p.evaluate(&[]), Min(Some(0))); +} + +#[test] +fn test_open_shop_scheduling_evaluate_two_by_two() { + let p = two_by_two(); + // M1=[0,1], M2=[0,1]: valid permutations + // Simulation: + // Step 1: best start is min over M1(J1: max(0,0)=0) and M2(J1: max(0,0)=0) + // → machine 0 (tie-break), schedule J1 on M1: [0,1), machine_avail[0]=1, job_avail[0]=1 + // Step 2: M1 next is J2 (start=max(1,0)=1), M2 next is J1 (start=max(0,1)=1) + // → machine 0 (tie-break), schedule J2 on M1: [1,3), machine_avail[0]=3, job_avail[1]=3 + // Step 3: M1 done, M2 next is J1 (start=max(0,1)=1), schedule J1 on M2: [1,3), machine_avail[1]=3, job_avail[0]=3 + // Step 4: M2 next is J2 (start=max(3,3)=3), schedule J2 on M2: [3,4), machine_avail[1]=4, job_avail[1]=4 + // Makespan = 4 + let config = vec![0, 1, 0, 1]; + let val = p.evaluate(&config); + assert!(val.0.is_some()); + assert_eq!(val, Min(Some(4))); +} + +// ─── decode_orders ─────────────────────────────────────────────────────────── + +#[test] +fn test_open_shop_scheduling_decode_orders_valid() { + let p = two_by_two(); + let config = vec![0, 1, 1, 0]; + let orders = p.decode_orders(&config).unwrap(); + assert_eq!(orders, vec![vec![0, 1], vec![1, 0]]); +} + +#[test] +fn test_open_shop_scheduling_decode_orders_invalid_duplicate() { + let p = two_by_two(); + let config = vec![0, 0, 1, 0]; // first machine has duplicate 0 + assert!(p.decode_orders(&config).is_none()); +} + +#[test] +fn test_open_shop_scheduling_decode_orders_invalid_out_of_range() { + let p = two_by_two(); + let config = vec![0, 2, 1, 0]; // job 2 out of range for n=2 + assert!(p.decode_orders(&config).is_none()); +} + +// ─── compute_makespan ──────────────────────────────────────────────────────── + +#[test] +fn test_open_shop_scheduling_compute_makespan_optimal_schedule() { + let p = issue_example(); + // True optimal: M1=[0,1,2,3], M2=[1,0,3,2], M3=[2,3,0,1], makespan=8 + let orders = vec![ + vec![0, 1, 2, 3], // M1 + vec![1, 0, 3, 2], // M2 + vec![2, 3, 0, 1], // M3 + ]; + assert_eq!(p.compute_makespan(&orders), 8); +} + +#[test] +fn test_open_shop_scheduling_compute_makespan_issue_example_schedule() { + let p = issue_example(); + // The schedule from the issue body: makespan=11 (valid but suboptimal) + let orders = vec![vec![2, 1, 0, 3], vec![2, 1, 0, 3], vec![2, 0, 1, 3]]; + // Manually verified start/finish times: + // J1: M1=[3,6), M2=[6,7), M3=[7,9) + // J2: M1=[1,3), M2=[3,6), M3=[9,10) + // J3: M1=[0,1), M2=[1,3), M3=[3,6) + // J4: M1=[6,8), M2=[8,10), M3=[10,11) + assert_eq!(p.compute_makespan(&orders), 11); +} + +// ─── problem trait ─────────────────────────────────────────────────────────── + +#[test] +fn test_open_shop_scheduling_problem_name_and_variant() { + assert_eq!(::NAME, "OpenShopScheduling"); + assert!(::variant().is_empty()); +} + +// ─── serialization ─────────────────────────────────────────────────────────── + +#[test] +fn test_open_shop_scheduling_serialization() { + let p = issue_example(); + let json = serde_json::to_value(&p).unwrap(); + let restored: OpenShopScheduling = serde_json::from_value(json).unwrap(); + assert_eq!(restored.num_machines(), p.num_machines()); + assert_eq!(restored.num_jobs(), p.num_jobs()); + assert_eq!(restored.processing_times(), p.processing_times()); +} + +// ─── brute-force solver ────────────────────────────────────────────────────── + +#[test] +fn test_open_shop_scheduling_brute_force_small() { + // 2x2 instance: brute force over 2^4 = 16 configs (4 valid schedules) + let p = two_by_two(); + let solver = BruteForce::new(); + let value = Solver::solve(&solver, &p); + assert!(value.0.is_some()); + // Optimal value for this instance + assert_eq!(value, Min(Some(3))); + let witness = solver.find_witness(&p).unwrap(); + assert_eq!(p.evaluate(&witness), Min(Some(3))); +} + +#[test] +fn test_open_shop_scheduling_brute_force_medium() { + // 3x3 instance: brute force over 3^9 = 19683 configs (216 valid schedules) + let p = three_by_three(); + let solver = BruteForce::new(); + let value = Solver::solve(&solver, &p); + assert!(value.0.is_some()); + let witness = solver.find_witness(&p).unwrap(); + assert_eq!(p.evaluate(&witness), value); +} + +#[test] +fn test_open_shop_scheduling_canonical_example_config_is_optimal() { + // Verify that the canonical example config achieves the true optimal makespan = 8 + let p = issue_example(); + let optimal_config = vec![0, 1, 2, 3, 1, 0, 3, 2, 2, 3, 0, 1]; + assert_eq!(p.evaluate(&optimal_config), Min(Some(8))); +} diff --git a/src/unit_tests/rules/openshopscheduling_ilp.rs b/src/unit_tests/rules/openshopscheduling_ilp.rs new file mode 100644 index 00000000..cd552843 --- /dev/null +++ b/src/unit_tests/rules/openshopscheduling_ilp.rs @@ -0,0 +1,143 @@ +use super::*; +use crate::models::algebraic::ILP; +use crate::models::misc::OpenShopScheduling; +use crate::solvers::ILPSolver; +use crate::traits::Problem; +use crate::types::Min; + +/// 2 machines, 2 jobs: smallest non-trivial instance. +/// processing_times[j][i]: J1=[1,2], J2=[2,1]. Optimal makespan = 3. +fn small_instance() -> OpenShopScheduling { + OpenShopScheduling::new(2, vec![vec![1, 2], vec![2, 1]]) +} + +/// 3 machines, 2 jobs. +fn medium_instance() -> OpenShopScheduling { + OpenShopScheduling::new(3, vec![vec![3, 1, 2], vec![2, 3, 1]]) +} + +// ─── structure ─────────────────────────────────────────────────────────────── + +#[test] +fn test_openshopscheduling_to_ilp_structure_small() { + let p = small_instance(); + let reduction: ReductionOSSToILP = ReduceTo::>::reduce_to(&p); + let ilp = reduction.target_problem(); + + // n=2, m=2: + // num_pairs = 1, num_order_vars = 1*2 = 2 (x_{0,1,0}, x_{0,1,1}) + // num_start_vars = 2*2 = 4 (s_{0,0}, s_{0,1}, s_{1,0}, s_{1,1}) + // num_machine_pairs = 1, num_job_pair_vars = 2*1 = 2 (y_{0,0,1}, y_{1,0,1}) + // c_var = 1 + // Total = 2 + 4 + 2 + 1 = 9 + assert_eq!( + ilp.num_vars, 9, + "expected 9 variables, got {}", + ilp.num_vars + ); + // Constraint count: 2 bound_x + 4 s_upper + 1 c_upper + 4 machine_nooverlap + // + 2 bound_y + 4 job_nooverlap + 4 makespan = 21 + assert_eq!( + ilp.constraints.len(), + 21, + "expected 21 constraints, got {}", + ilp.constraints.len() + ); + assert_eq!( + ilp.objective, + vec![(8, 1.0)], + "objective should minimize C (index 8)" + ); +} + +// ─── closed-loop ───────────────────────────────────────────────────────────── + +#[test] +fn test_openshopscheduling_to_ilp_closed_loop_small() { + let p = small_instance(); + let reduction: ReductionOSSToILP = ReduceTo::>::reduce_to(&p); + let ilp_solution = ILPSolver::new() + .solve(reduction.target_problem()) + .expect("ILP should be feasible"); + + let extracted = reduction.extract_solution(&ilp_solution); + let value = p.evaluate(&extracted); + assert!( + value.0.is_some(), + "extracted schedule must be valid, got {value:?}" + ); + // Optimal makespan = 3 + assert_eq!(value, Min(Some(3)), "ILP should find optimal makespan = 3"); +} + +#[test] +fn test_openshopscheduling_to_ilp_closed_loop_medium() { + let p = medium_instance(); + let reduction: ReductionOSSToILP = ReduceTo::>::reduce_to(&p); + let ilp_solution = ILPSolver::new() + .solve(reduction.target_problem()) + .expect("ILP should be feasible"); + + let extracted = reduction.extract_solution(&ilp_solution); + let value = p.evaluate(&extracted); + assert!( + value.0.is_some(), + "extracted schedule must be valid, got {value:?}" + ); + // Max machine total = max(5, 4, 3) = 5; max job total = max(6, 6) = 6 + // Lower bound = 6. + let makespan = value.0.unwrap(); + assert!(makespan >= 6, "makespan {makespan} must be ≥ lower bound 6"); +} + +// ─── extract_solution ──────────────────────────────────────────────────────── + +#[test] +fn test_openshopscheduling_to_ilp_extract_solution_respects_start_times() { + // For small instance, if we manually craft an ILP solution, extraction should + // order jobs on each machine by start time. + let p = small_instance(); + let reduction: ReductionOSSToILP = ReduceTo::>::reduce_to(&p); + + // Variable layout: x_{0,1,0}=0, x_{0,1,1}=1, s_{0,0}=1, s_{0,1}=0, s_{1,0}=0, s_{1,1}=2, y_{0,0,1}=0, y_{1,0,1}=1, C=3 + // => M1: job 1 starts at 0, job 0 starts at 1 → order [1, 0] + // => M2: job 0 starts at 0, job 1 starts at 2 → order [0, 1] + let target_solution = vec![0, 1, 1, 0, 0, 2, 0, 1, 3]; + let extracted = reduction.extract_solution(&target_solution); + // M1: J1 at t=0, J0 at t=1 → order [1, 0] + // M2: J0 at t=0, J1 at t=2 → order [0, 1] + assert_eq!(extracted[0..2], [1, 0], "M1 order should be [1, 0]"); + assert_eq!(extracted[2..4], [0, 1], "M2 order should be [0, 1]"); + let value = p.evaluate(&extracted); + assert!(value.0.is_some(), "extracted config should be valid"); +} + +// ─── single job / single machine ───────────────────────────────────────────── + +#[test] +fn test_openshopscheduling_to_ilp_single_job() { + // 1 job, 2 machines: trivial, makespan = sum of processing times + let p = OpenShopScheduling::new(2, vec![vec![3, 4]]); + let reduction: ReductionOSSToILP = ReduceTo::>::reduce_to(&p); + let ilp_solution = ILPSolver::new() + .solve(reduction.target_problem()) + .expect("ILP should be feasible"); + let extracted = reduction.extract_solution(&ilp_solution); + let value = p.evaluate(&extracted); + assert!(value.0.is_some()); + assert_eq!(value, Min(Some(7))); +} + +#[test] +fn test_openshopscheduling_to_ilp_single_machine() { + // 3 jobs, 1 machine: serial schedule, makespan = sum of all processing times + let p = OpenShopScheduling::new(1, vec![vec![2], vec![3], vec![1]]); + let reduction: ReductionOSSToILP = ReduceTo::>::reduce_to(&p); + let ilp_solution = ILPSolver::new() + .solve(reduction.target_problem()) + .expect("ILP should be feasible"); + let extracted = reduction.extract_solution(&ilp_solution); + let value = p.evaluate(&extracted); + assert!(value.0.is_some()); + assert_eq!(value, Min(Some(6))); +} From b59c7f47ee23847608107813ec39d4315e813907 Mon Sep 17 00:00:00 2001 From: Xiwei Pan Date: Mon, 30 Mar 2026 07:51:32 +0800 Subject: [PATCH 07/21] feat: add QuadraticCongruences model (#536) Implement the quadratic congruences feasibility problem (GJ AN1) with brute-force solver, CLI support, canonical example, and paper entry. Co-Authored-By: Claude Opus 4.6 (1M context) --- docs/paper/reductions.typ | 46 +++++ problemreductions-cli/src/cli.rs | 7 +- problemreductions-cli/src/commands/create.rs | 31 +++- src/models/algebraic/mod.rs | 4 + src/models/algebraic/quadratic_congruences.rs | 175 ++++++++++++++++++ .../models/algebraic/quadratic_congruences.rs | 154 +++++++++++++++ 6 files changed, 412 insertions(+), 5 deletions(-) create mode 100644 src/models/algebraic/quadratic_congruences.rs create mode 100644 src/unit_tests/models/algebraic/quadratic_congruences.rs diff --git a/docs/paper/reductions.typ b/docs/paper/reductions.typ index 47746047..df64d607 100644 --- a/docs/paper/reductions.typ +++ b/docs/paper/reductions.typ @@ -181,6 +181,7 @@ "PreemptiveScheduling": [Preemptive Scheduling], "PrimeAttributeName": [Prime Attribute Name], "QuadraticAssignment": [Quadratic Assignment], + "QuadraticCongruences": [Quadratic Congruences], "QuadraticDiophantineEquations": [Quadratic Diophantine Equations], "QuantifiedBooleanFormulas": [Quantified Boolean Formulas (QBF)], "RectilinearPictureCompression": [Rectilinear Picture Compression], @@ -3268,6 +3269,51 @@ A classical NP-complete problem from Garey and Johnson @garey1979[Ch.~3, p.~76], ] } +#{ + let x = load-model-example("QuadraticCongruences") + let a = x.instance.a + let b = x.instance.b + let c = x.instance.c + let config = x.optimal_config + let xval = config.at(0) + 1 + // Collect all x in {1..c-1} and check x² mod b == a + let rows = range(1, c).map(xi => { + let sq = xi * xi + let rem = calc.rem(sq, b) + let ok = rem == a + (xi, sq, rem, ok) + }) + [ + #problem-def("QuadraticCongruences")[ + Given non-negative integers $a$, $b$, $c$ with $b > 0$ and $a < b$, determine whether there exists a positive integer $x$ with $1 <= x < c$ such that $x^2 equiv a space (mod space b)$. + ][ + Quadratic Congruences is an NP-complete problem in the setting where $b$ is composite and given in unary (or the factorisation of $b$ is not provided) @garey1979. The problem asks whether $a$ is a _quadratic residue_ modulo $b$ in the range $\{1, dots, c-1\}$. When $b$ is prime, quadratic residuosity can be decided in polynomial time via Euler's criterion or the Legendre symbol, but the general case with composite modulus is believed to be computationally hard without the factorisation of $b$. + + *Example.* Let $a = #a$, $b = #b$, $c = #c$. We test each $x in {1, dots, #(c - 1)}$: + + #pred-commands( + "pred create --example QuadraticCongruences -o qc.json", + "pred solve qc.json --solver brute-force", + "pred evaluate qc.json --config " + config.map(str).join(","), + ) + + #align(center, table( + columns: 4, + align: center, + table.header([$x$], [$x^2$], [$x^2 mod #b$], [Satisfies?]), + ..rows.map(((xi, sq, rem, ok)) => ( + [$#xi$], + [$#sq$], + [$#rem$], + [#if ok [Yes] else [No]], + )).flatten(), + )) + + The instance is satisfiable: $x = #xval$ gives $#xval^2 = #(xval * xval) equiv #a space (mod space #b)$. + ] + ] +} + #{ let x = load-model-example("QuadraticDiophantineEquations") let a = x.instance.a diff --git a/problemreductions-cli/src/cli.rs b/problemreductions-cli/src/cli.rs index fbd0d5ae..94d2d94c 100644 --- a/problemreductions-cli/src/cli.rs +++ b/problemreductions-cli/src/cli.rs @@ -250,6 +250,7 @@ Flags by problem type: SubsetSum --sizes, --target ThreePartition --sizes, --bound KthLargestMTuple --sets, --k, --bound + QuadraticCongruences --coeff-a, --coeff-b, --coeff-c QuadraticDiophantineEquations --coeff-a, --coeff-b, --coeff-c SumOfSquaresPartition --sizes, --num-groups ExpectedRetrievalCost --probabilities, --num-sectors @@ -776,13 +777,13 @@ pub struct CreateArgs { /// Expression tree for IntegerExpressionMembership (JSON, e.g., '{"Sum":[{"Atom":1},{"Atom":2}]}') #[arg(long)] pub expression: Option, - /// Coefficient a for QuadraticDiophantineEquations (coefficient of x²) + /// Coefficient/parameter a for QuadraticCongruences (residue target) or QuadraticDiophantineEquations (coefficient of x²) #[arg(long)] pub coeff_a: Option, - /// Coefficient b for QuadraticDiophantineEquations (coefficient of y) + /// Coefficient/parameter b for QuadraticCongruences (modulus) or QuadraticDiophantineEquations (coefficient of y) #[arg(long)] pub coeff_b: Option, - /// Constant c for QuadraticDiophantineEquations (right-hand side of ax² + by = c) + /// Constant c for QuadraticCongruences (search-space bound) or QuadraticDiophantineEquations (right-hand side of ax² + by = c) #[arg(long)] pub coeff_c: Option, } diff --git a/problemreductions-cli/src/commands/create.rs b/problemreductions-cli/src/commands/create.rs index 070b12cb..11d18df7 100644 --- a/problemreductions-cli/src/commands/create.rs +++ b/problemreductions-cli/src/commands/create.rs @@ -9,8 +9,8 @@ use anyhow::{bail, Context, Result}; use problemreductions::export::{ModelExample, ProblemRef, ProblemSide, RuleExample}; use problemreductions::models::algebraic::{ ClosestVectorProblem, ConsecutiveBlockMinimization, ConsecutiveOnesMatrixAugmentation, - ConsecutiveOnesSubmatrix, FeasibleBasisExtension, QuadraticDiophantineEquations, - SparseMatrixCompression, BMF, + ConsecutiveOnesSubmatrix, FeasibleBasisExtension, QuadraticCongruences, + QuadraticDiophantineEquations, SparseMatrixCompression, BMF, }; use problemreductions::models::formula::Quantifier; use problemreductions::models::graph::{ @@ -754,6 +754,7 @@ fn example_for(canonical: &str, graph_type: Option<&str>) -> &'static str { } "ThreePartition" => "--sizes 4,5,6,4,6,5 --bound 15", "KthLargestMTuple" => "--sets \"2,5,8;3,6;1,4,7\" --k 14 --bound 12", + "QuadraticCongruences" => "--coeff-a 4 --coeff-b 15 --coeff-c 10", "QuadraticDiophantineEquations" => "--coeff-a 3 --coeff-b 5 --coeff-c 53", "BoyceCoddNormalFormViolation" => { "--n 6 --sets \"0,1:2;2:3;3,4:5\" --target 0,1,2,3,4,5" @@ -2514,6 +2515,32 @@ pub fn create(args: &CreateArgs, out: &OutputConfig) -> Result<()> { ) } + // QuadraticCongruences + "QuadraticCongruences" => { + let a = args.coeff_a.ok_or_else(|| { + anyhow::anyhow!( + "QuadraticCongruences requires --coeff-a, --coeff-b, and --coeff-c\n\n\ + Usage: pred create QuadraticCongruences --coeff-a 4 --coeff-b 15 --coeff-c 10" + ) + })?; + let b = args.coeff_b.ok_or_else(|| { + anyhow::anyhow!( + "QuadraticCongruences requires --coeff-b\n\n\ + Usage: pred create QuadraticCongruences --coeff-a 4 --coeff-b 15 --coeff-c 10" + ) + })?; + let c = args.coeff_c.ok_or_else(|| { + anyhow::anyhow!( + "QuadraticCongruences requires --coeff-c\n\n\ + Usage: pred create QuadraticCongruences --coeff-a 4 --coeff-b 15 --coeff-c 10" + ) + })?; + ( + ser(QuadraticCongruences::try_new(a, b, c).map_err(anyhow::Error::msg)?)?, + resolved_variant.clone(), + ) + } + // QuadraticDiophantineEquations "QuadraticDiophantineEquations" => { let a = args.coeff_a.ok_or_else(|| { diff --git a/src/models/algebraic/mod.rs b/src/models/algebraic/mod.rs index c3694736..f52f471f 100644 --- a/src/models/algebraic/mod.rs +++ b/src/models/algebraic/mod.rs @@ -8,6 +8,7 @@ //! - [`ConsecutiveBlockMinimization`]: Consecutive Block Minimization //! - [`ConsecutiveOnesSubmatrix`]: Consecutive Ones Submatrix (column selection with C1P) //! - [`QuadraticAssignment`]: Quadratic Assignment Problem +//! - [`QuadraticCongruences`]: Decide x² ≡ a (mod b) for x in {1, ..., c-1} //! - [`QuadraticDiophantineEquations`]: Decide ax² + by = c in positive integers //! - [`SparseMatrixCompression`]: Sparse Matrix Compression by row overlay @@ -19,6 +20,7 @@ pub(crate) mod consecutive_ones_submatrix; pub(crate) mod feasible_basis_extension; pub(crate) mod ilp; pub(crate) mod quadratic_assignment; +pub(crate) mod quadratic_congruences; pub(crate) mod quadratic_diophantine_equations; pub(crate) mod qubo; pub(crate) mod sparse_matrix_compression; @@ -31,6 +33,7 @@ pub use consecutive_ones_submatrix::ConsecutiveOnesSubmatrix; pub use feasible_basis_extension::FeasibleBasisExtension; pub use ilp::{Comparison, LinearConstraint, ObjectiveSense, VariableDomain, ILP}; pub use quadratic_assignment::QuadraticAssignment; +pub use quadratic_congruences::QuadraticCongruences; pub use quadratic_diophantine_equations::QuadraticDiophantineEquations; pub use qubo::QUBO; pub use sparse_matrix_compression::SparseMatrixCompression; @@ -47,6 +50,7 @@ pub(crate) fn canonical_model_example_specs() -> Vec 0 and a < b, determine whether +//! there exists a positive integer x with 1 ≤ x < c such that x² ≡ a (mod b). + +use crate::registry::{FieldInfo, ProblemSchemaEntry, ProblemSizeFieldEntry}; +use crate::traits::Problem; +use crate::types::Or; +use serde::de::Error as _; +use serde::{Deserialize, Deserializer, Serialize}; + +inventory::submit! { + ProblemSchemaEntry { + name: "QuadraticCongruences", + display_name: "Quadratic Congruences", + aliases: &[], + dimensions: &[], + module_path: module_path!(), + description: "Decide whether x² ≡ a (mod b) has a solution for x in {1, ..., c-1}", + fields: &[ + FieldInfo { name: "a", type_name: "u64", description: "a" }, + FieldInfo { name: "b", type_name: "u64", description: "b" }, + FieldInfo { name: "c", type_name: "u64", description: "c" }, + ], + } +} + +inventory::submit! { + ProblemSizeFieldEntry { + name: "QuadraticCongruences", + fields: &["c"], + } +} + +/// Quadratic Congruences problem. +/// +/// Given non-negative integers a, b, c with b > 0 and a < b, determine whether +/// there exists a positive integer x with 1 ≤ x < c such that x² ≡ a (mod b). +/// +/// The search space is x ∈ {1, …, c−1}. The configuration variable `config[0]` +/// encodes x as `x = config[0] + 1`. +/// +/// # Example +/// +/// ``` +/// use problemreductions::models::algebraic::QuadraticCongruences; +/// use problemreductions::{Problem, Solver, BruteForce}; +/// +/// // a=4, b=15, c=10: x=2 → 4 mod 15 = 4 ✓ +/// let problem = QuadraticCongruences::new(4, 15, 10); +/// let solver = BruteForce::new(); +/// let witness = solver.find_witness(&problem); +/// assert!(witness.is_some()); +/// ``` +#[derive(Debug, Clone, Serialize)] +pub struct QuadraticCongruences { + /// Quadratic residue target. + a: u64, + /// Modulus. + b: u64, + /// Search-space bound; x ranges over {1, ..., c-1}. + c: u64, +} + +impl QuadraticCongruences { + fn validate_inputs(a: u64, b: u64, c: u64) -> Result<(), String> { + if b == 0 { + return Err("Modulus b must be positive".to_string()); + } + if c == 0 { + return Err("Bound c must be positive".to_string()); + } + if a >= b { + return Err(format!("Residue a ({a}) must be less than modulus b ({b})")); + } + Ok(()) + } + + /// Create a new QuadraticCongruences instance, returning an error instead of + /// panicking when the inputs are invalid. + pub fn try_new(a: u64, b: u64, c: u64) -> Result { + Self::validate_inputs(a, b, c)?; + Ok(Self { a, b, c }) + } + + /// Create a new QuadraticCongruences instance. + /// + /// # Panics + /// + /// Panics if `b == 0`, `c == 0`, or `a >= b`. + pub fn new(a: u64, b: u64, c: u64) -> Self { + Self::try_new(a, b, c).unwrap_or_else(|msg| panic!("{msg}")) + } + + /// Get the quadratic residue target a. + pub fn a(&self) -> u64 { + self.a + } + + /// Get the modulus b. + pub fn b(&self) -> u64 { + self.b + } + + /// Get the search-space bound c. + pub fn c(&self) -> u64 { + self.c + } +} + +#[derive(Deserialize)] +struct QuadraticCongruencesData { + a: u64, + b: u64, + c: u64, +} + +impl<'de> Deserialize<'de> for QuadraticCongruences { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let data = QuadraticCongruencesData::deserialize(deserializer)?; + Self::try_new(data.a, data.b, data.c).map_err(D::Error::custom) + } +} + +impl Problem for QuadraticCongruences { + const NAME: &'static str = "QuadraticCongruences"; + type Value = Or; + + fn variant() -> Vec<(&'static str, &'static str)> { + crate::variant_params![] + } + + fn dims(&self) -> Vec { + if self.c <= 1 { + // No x in {1, ..., c-1} exists. + return vec![]; + } + // config[0] ∈ {0, ..., c-2} maps to x = config[0] + 1 ∈ {1, ..., c-1}. + vec![self.c as usize - 1] + } + + fn evaluate(&self, config: &[usize]) -> Or { + if self.c <= 1 { + return Or(false); + } + if config.len() != 1 { + return Or(false); + } + let x = (config[0] as u64) + 1; // 1-indexed + let satisfies = ((x as u128) * (x as u128)) % (self.b as u128) == (self.a as u128); + Or(satisfies) + } +} + +crate::declare_variants! { + default QuadraticCongruences => "c", +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_model_example_specs() -> Vec { + vec![crate::example_db::specs::ModelExampleSpec { + id: "quadratic_congruences", + instance: Box::new(QuadraticCongruences::new(4, 15, 10)), + // x=2 (config[0]=1): 2²=4 ≡ 4 (mod 15) ✓ + optimal_config: vec![1], + optimal_value: serde_json::json!(true), + }] +} + +#[cfg(test)] +#[path = "../../unit_tests/models/algebraic/quadratic_congruences.rs"] +mod tests; diff --git a/src/unit_tests/models/algebraic/quadratic_congruences.rs b/src/unit_tests/models/algebraic/quadratic_congruences.rs new file mode 100644 index 00000000..c8595297 --- /dev/null +++ b/src/unit_tests/models/algebraic/quadratic_congruences.rs @@ -0,0 +1,154 @@ +use crate::models::algebraic::QuadraticCongruences; +use crate::solvers::BruteForce; +use crate::traits::Problem; +use crate::types::Or; + +fn yes_problem() -> QuadraticCongruences { + // a=4, b=15, c=10: x=2 → 4 mod 15 = 4 ✓; x=7 → 49 mod 15 = 4 ✓; x=8 → 64 mod 15 = 4 ✓ + QuadraticCongruences::new(4, 15, 10) +} + +fn no_problem() -> QuadraticCongruences { + // a=3, b=7, c=7: no x in {1..6} satisfies x² ≡ 3 (mod 7) (QRs mod 7 are {0,1,2,4}) + QuadraticCongruences::new(3, 7, 7) +} + +#[test] +fn test_quadratic_congruences_creation_and_accessors() { + let p = yes_problem(); + assert_eq!(p.a(), 4); + assert_eq!(p.b(), 15); + assert_eq!(p.c(), 10); + // config[0] ∈ {0..8} → x ∈ {1..9}: dims = [9] + assert_eq!(p.dims(), vec![9]); + assert_eq!(p.num_variables(), 1); + assert_eq!( + ::NAME, + "QuadraticCongruences" + ); + assert_eq!(::variant(), vec![]); +} + +#[test] +fn test_quadratic_congruences_evaluate_yes() { + let p = yes_problem(); + // x=2 (config[0]=1): 4 mod 15 = 4 ✓ + assert_eq!(p.evaluate(&[1]), Or(true)); + // x=7 (config[0]=6): 49 mod 15 = 4 ✓ + assert_eq!(p.evaluate(&[6]), Or(true)); + // x=8 (config[0]=7): 64 mod 15 = 4 ✓ + assert_eq!(p.evaluate(&[7]), Or(true)); + // x=1 (config[0]=0): 1 mod 15 = 1 ≠ 4 + assert_eq!(p.evaluate(&[0]), Or(false)); + // x=3 (config[0]=2): 9 mod 15 = 9 ≠ 4 + assert_eq!(p.evaluate(&[2]), Or(false)); +} + +#[test] +fn test_quadratic_congruences_evaluate_no() { + let p = no_problem(); + // dims = [6]: x ∈ {1..6} + assert_eq!(p.dims(), vec![6]); + for cfg in 0..6 { + // quadratic residues mod 7 are {0,1,2,4}; 3 is not one + assert_eq!(p.evaluate(&[cfg]), Or(false)); + } +} + +#[test] +fn test_quadratic_congruences_evaluate_invalid_config() { + let p = yes_problem(); + // Wrong number of variables + assert_eq!(p.evaluate(&[]), Or(false)); + assert_eq!(p.evaluate(&[0, 1]), Or(false)); +} + +#[test] +fn test_quadratic_congruences_c_le_1() { + // c=1: search space {1..0} is empty + let p = QuadraticCongruences::new(0, 5, 1); + assert_eq!(p.dims(), Vec::::new()); + assert_eq!(p.evaluate(&[0]), Or(false)); + assert_eq!(p.evaluate(&[]), Or(false)); +} + +#[test] +fn test_quadratic_congruences_brute_force_finds_witness() { + let solver = BruteForce::new(); + let witness = solver.find_witness(&yes_problem()).unwrap(); + assert_eq!(yes_problem().evaluate(&witness), Or(true)); +} + +#[test] +fn test_quadratic_congruences_brute_force_finds_all_witnesses() { + let solver = BruteForce::new(); + let all = solver.find_all_witnesses(&yes_problem()); + // x=2 (cfg=1), x=7 (cfg=6), x=8 (cfg=7) + assert_eq!(all.len(), 3); + assert!(all + .iter() + .all(|sol| yes_problem().evaluate(sol) == Or(true))); +} + +#[test] +fn test_quadratic_congruences_brute_force_no_witness() { + let solver = BruteForce::new(); + assert!(solver.find_witness(&no_problem()).is_none()); +} + +#[test] +fn test_quadratic_congruences_serialization() { + let p = yes_problem(); + let json = serde_json::to_value(&p).unwrap(); + assert_eq!(json, serde_json::json!({"a": 4, "b": 15, "c": 10})); + + let restored: QuadraticCongruences = serde_json::from_value(json).unwrap(); + assert_eq!(restored.a(), p.a()); + assert_eq!(restored.b(), p.b()); + assert_eq!(restored.c(), p.c()); +} + +#[test] +fn test_quadratic_congruences_deserialization_rejects_invalid() { + // b=0 + let r: Result = + serde_json::from_value(serde_json::json!({"a": 0, "b": 0, "c": 5})); + assert!(r.is_err()); + // c=0 + let r: Result = + serde_json::from_value(serde_json::json!({"a": 0, "b": 5, "c": 0})); + assert!(r.is_err()); + // a >= b + let r: Result = + serde_json::from_value(serde_json::json!({"a": 7, "b": 5, "c": 10})); + assert!(r.is_err()); +} + +#[test] +fn test_quadratic_congruences_paper_example() { + // Canonical example: a=4, b=15, c=10; optimal config [1] (x=2) + let p = QuadraticCongruences::new(4, 15, 10); + assert_eq!(p.evaluate(&[1]), Or(true)); + + let solver = BruteForce::new(); + let witness = solver.find_witness(&p).unwrap(); + assert_eq!(p.evaluate(&witness), Or(true)); +} + +#[test] +#[should_panic(expected = "Modulus b must be positive")] +fn test_quadratic_congruences_panics_on_zero_b() { + QuadraticCongruences::new(0, 0, 5); +} + +#[test] +#[should_panic(expected = "Bound c must be positive")] +fn test_quadratic_congruences_panics_on_zero_c() { + QuadraticCongruences::new(0, 5, 0); +} + +#[test] +#[should_panic(expected = "Residue a")] +fn test_quadratic_congruences_panics_on_a_ge_b() { + QuadraticCongruences::new(5, 5, 10); +} From 6f87f403acdf47c965b6f572bbdd269de125a17e Mon Sep 17 00:00:00 2001 From: Xiwei Pan Date: Mon, 30 Mar 2026 08:13:46 +0800 Subject: [PATCH 08/21] feat: add SimultaneousIncongruences model (#537) Implement the simultaneous incongruences feasibility problem (GJ AN2) with brute-force solver, CLI support, canonical example, and paper entry. Co-Authored-By: Claude Opus 4.6 (1M context) --- docs/paper/reductions.typ | 60 ++++++ problemreductions-cli/src/cli.rs | 4 + problemreductions-cli/src/commands/create.rs | 39 +++- src/models/algebraic/mod.rs | 4 + .../algebraic/simultaneous_incongruences.rs | 192 ++++++++++++++++++ src/models/mod.rs | 3 +- .../algebraic/simultaneous_incongruences.rs | 128 ++++++++++++ 7 files changed, 428 insertions(+), 2 deletions(-) create mode 100644 src/models/algebraic/simultaneous_incongruences.rs create mode 100644 src/unit_tests/models/algebraic/simultaneous_incongruences.rs diff --git a/docs/paper/reductions.typ b/docs/paper/reductions.typ index df64d607..10ac82c7 100644 --- a/docs/paper/reductions.typ +++ b/docs/paper/reductions.typ @@ -183,6 +183,7 @@ "QuadraticAssignment": [Quadratic Assignment], "QuadraticCongruences": [Quadratic Congruences], "QuadraticDiophantineEquations": [Quadratic Diophantine Equations], + "SimultaneousIncongruences": [Simultaneous Incongruences], "QuantifiedBooleanFormulas": [Quantified Boolean Formulas (QBF)], "RectilinearPictureCompression": [Rectilinear Picture Compression], "RegisterSufficiency": [Register Sufficiency], @@ -3361,6 +3362,65 @@ A classical NP-complete problem from Garey and Johnson @garey1979[Ch.~3, p.~76], ] } +#{ + let x = load-model-example("SimultaneousIncongruences") + let pairs = x.instance.pairs + let config = x.optimal_config + let xval = config.at(0) + // Build table rows: for each pair (a, b), compute x mod b and check ≠ a mod b + let rows = pairs.map(pair => { + let a = pair.at(0) + let b = pair.at(1) + let r = calc.rem(xval, b) + let cond = calc.rem(a, b) + let ok = r != cond + (a, b, r, cond, ok) + }) + let moduli = pairs.map(p => p.at(1)) + // lcm(2,3,5,7) = 210 for the canonical example + let lcm-val = moduli.fold(1, (l, b) => l * b / { + // gcd via Euclidean algorithm (unrolled for small values) + let aa = l + let bb = b + while bb != 0 { + let tmp = calc.rem(aa, bb) + aa = bb + bb = tmp + } + aa + }) + [ + #problem-def("SimultaneousIncongruences")[ + Given a list of pairs $(a_i, b_i)$ with $b_i > 0$ and $1 <= a_i <= b_i$ for $i = 1, dots, n$, determine whether there exists a non-negative integer $x$ such that $x equiv.not a_i space (mod space b_i)$ for all $i$. + ][ + Simultaneous Incongruences is an NP-complete problem @garey1979. It asks whether the complement of a system of congruences — a _covering system_ — can be simultaneously avoided. A _covering system_ is a finite collection of congruences $\{a_i space (op("mod") space b_i)\}$ that covers every integer; when the system is a covering system there is no valid $x$ and the instance is a "no" instance. The problem generalises checking whether a given set of congruences is a covering system, which has connections to Erdős's covering conjecture and sieve methods in analytic number theory. + + *Example.* Let $n = #pairs.len()$ with pairs #pairs.map(p => $(#p.at(0), #p.at(1))$).join(", "). The full period is $L = op("lcm")(#moduli.map(str).join(", ")) = #lcm-val$. We test $x = #xval$: + + #pred-commands( + "pred create --example SimultaneousIncongruences -o si.json", + "pred solve si.json --solver brute-force", + "pred evaluate si.json --config " + config.map(str).join(","), + ) + + #align(center, table( + columns: 5, + align: center, + table.header([$a_i$], [$b_i$], [$#xval mod b_i$], [$a_i mod b_i$], [Avoids?]), + ..rows.map(((a, b, r, cond, ok)) => ( + [$#a$], + [$#b$], + [$#r$], + [$#cond$], + [#if ok [Yes] else [No]], + )).flatten(), + )) + + The instance is satisfiable: $x = #xval$ avoids all congruences. + ] + ] +} + #{ let x = load-model-example("ClosestVectorProblem") let basis = x.instance.basis diff --git a/problemreductions-cli/src/cli.rs b/problemreductions-cli/src/cli.rs index 94d2d94c..664c4654 100644 --- a/problemreductions-cli/src/cli.rs +++ b/problemreductions-cli/src/cli.rs @@ -252,6 +252,7 @@ Flags by problem type: KthLargestMTuple --sets, --k, --bound QuadraticCongruences --coeff-a, --coeff-b, --coeff-c QuadraticDiophantineEquations --coeff-a, --coeff-b, --coeff-c + SimultaneousIncongruences --pairs (semicolon-separated a,b pairs) SumOfSquaresPartition --sizes, --num-groups ExpectedRetrievalCost --probabilities, --num-sectors PaintShop --sequence @@ -786,6 +787,9 @@ pub struct CreateArgs { /// Constant c for QuadraticCongruences (search-space bound) or QuadraticDiophantineEquations (right-hand side of ax² + by = c) #[arg(long)] pub coeff_c: Option, + /// Incongruence pairs for SimultaneousIncongruences (semicolon-separated "a,b" pairs, e.g., "2,2;1,3;2,5;3,7") + #[arg(long)] + pub pairs: Option, } #[derive(clap::Args)] diff --git a/problemreductions-cli/src/commands/create.rs b/problemreductions-cli/src/commands/create.rs index 11d18df7..14d86376 100644 --- a/problemreductions-cli/src/commands/create.rs +++ b/problemreductions-cli/src/commands/create.rs @@ -10,7 +10,7 @@ use problemreductions::export::{ModelExample, ProblemRef, ProblemSide, RuleExamp use problemreductions::models::algebraic::{ ClosestVectorProblem, ConsecutiveBlockMinimization, ConsecutiveOnesMatrixAugmentation, ConsecutiveOnesSubmatrix, FeasibleBasisExtension, QuadraticCongruences, - QuadraticDiophantineEquations, SparseMatrixCompression, BMF, + QuadraticDiophantineEquations, SimultaneousIncongruences, SparseMatrixCompression, BMF, }; use problemreductions::models::formula::Quantifier; use problemreductions::models::graph::{ @@ -205,6 +205,7 @@ fn all_data_flags_empty(args: &CreateArgs) -> bool { && args.coeff_b.is_none() && args.rhs.is_none() && args.coeff_c.is_none() + && args.pairs.is_none() && args.required_columns.is_none() && args.compilers.is_none() && args.setup_times.is_none() @@ -756,6 +757,7 @@ fn example_for(canonical: &str, graph_type: Option<&str>) -> &'static str { "KthLargestMTuple" => "--sets \"2,5,8;3,6;1,4,7\" --k 14 --bound 12", "QuadraticCongruences" => "--coeff-a 4 --coeff-b 15 --coeff-c 10", "QuadraticDiophantineEquations" => "--coeff-a 3 --coeff-b 5 --coeff-c 53", + "SimultaneousIncongruences" => "--pairs \"2,2;1,3;2,5;3,7\"", "BoyceCoddNormalFormViolation" => { "--n 6 --sets \"0,1:2;2:3;3,4:5\" --target 0,1,2,3,4,5" } @@ -2567,6 +2569,40 @@ pub fn create(args: &CreateArgs, out: &OutputConfig) -> Result<()> { ) } + // SimultaneousIncongruences + "SimultaneousIncongruences" => { + let pairs_str = args.pairs.as_deref().ok_or_else(|| { + anyhow::anyhow!( + "SimultaneousIncongruences requires --pairs\n\n\ + Usage: pred create SimultaneousIncongruences --pairs \"2,2;1,3;2,5;3,7\"" + ) + })?; + let pairs: Vec<(u64, u64)> = pairs_str + .split(';') + .map(|s| { + let parts: Vec<&str> = s.split(',').collect(); + if parts.len() != 2 { + return Err(anyhow::anyhow!( + "Each pair must be in \"a,b\" format, got: {s}" + )); + } + let a: u64 = parts[0] + .trim() + .parse() + .with_context(|| format!("Invalid integer in pair: {s}"))?; + let b: u64 = parts[1] + .trim() + .parse() + .with_context(|| format!("Invalid integer in pair: {s}"))?; + Ok((a, b)) + }) + .collect::>()?; + ( + ser(SimultaneousIncongruences::new(pairs).map_err(anyhow::Error::msg)?)?, + resolved_variant.clone(), + ) + } + // SumOfSquaresPartition "SumOfSquaresPartition" => { let sizes_str = args.sizes.as_deref().ok_or_else(|| { @@ -8214,6 +8250,7 @@ mod tests { coeff_b: None, rhs: None, coeff_c: None, + pairs: None, required_columns: None, compilers: None, setup_times: None, diff --git a/src/models/algebraic/mod.rs b/src/models/algebraic/mod.rs index f52f471f..9101e918 100644 --- a/src/models/algebraic/mod.rs +++ b/src/models/algebraic/mod.rs @@ -10,6 +10,7 @@ //! - [`QuadraticAssignment`]: Quadratic Assignment Problem //! - [`QuadraticCongruences`]: Decide x² ≡ a (mod b) for x in {1, ..., c-1} //! - [`QuadraticDiophantineEquations`]: Decide ax² + by = c in positive integers +//! - [`SimultaneousIncongruences`]: Decide whether x ≢ aᵢ (mod bᵢ) for all i simultaneously //! - [`SparseMatrixCompression`]: Sparse Matrix Compression by row overlay pub(crate) mod bmf; @@ -23,6 +24,7 @@ pub(crate) mod quadratic_assignment; pub(crate) mod quadratic_congruences; pub(crate) mod quadratic_diophantine_equations; pub(crate) mod qubo; +pub(crate) mod simultaneous_incongruences; pub(crate) mod sparse_matrix_compression; pub use bmf::BMF; @@ -36,6 +38,7 @@ pub use quadratic_assignment::QuadraticAssignment; pub use quadratic_congruences::QuadraticCongruences; pub use quadratic_diophantine_equations::QuadraticDiophantineEquations; pub use qubo::QUBO; +pub use simultaneous_incongruences::SimultaneousIncongruences; pub use sparse_matrix_compression::SparseMatrixCompression; #[cfg(feature = "example-db")] @@ -52,6 +55,7 @@ pub(crate) fn canonical_model_example_specs() -> Vec 0 and 1 ≤ aᵢ ≤ bᵢ, determine whether +//! there exists a non-negative integer x such that x ≢ aᵢ (mod bᵢ) for all i. + +use crate::registry::{FieldInfo, ProblemSchemaEntry, ProblemSizeFieldEntry}; +use crate::traits::Problem; +use crate::types::Or; +use serde::de::Error as _; +use serde::{Deserialize, Deserializer, Serialize}; + +inventory::submit! { + ProblemSchemaEntry { + name: "SimultaneousIncongruences", + display_name: "Simultaneous Incongruences", + aliases: &[], + dimensions: &[], + module_path: module_path!(), + description: "Decide whether there exists x with x ≢ aᵢ (mod bᵢ) for all i", + fields: &[ + FieldInfo { + name: "pairs", + type_name: "Vec<(u64, u64)>", + description: "Pairs (aᵢ, bᵢ) with bᵢ > 0 and 1 ≤ aᵢ ≤ bᵢ", + }, + ], + } +} + +inventory::submit! { + ProblemSizeFieldEntry { + name: "SimultaneousIncongruences", + fields: &["num_pairs"], + } +} + +/// Simultaneous Incongruences problem. +/// +/// Given a list of pairs (aᵢ, bᵢ) with bᵢ > 0 and 1 ≤ aᵢ ≤ bᵢ, determine whether +/// there exists a non-negative integer x such that x ≢ aᵢ (mod bᵢ) for all i simultaneously. +/// +/// The search space is x ∈ {0, …, L−1} where L = lcm(b₁, …, bₙ) (one full period). +/// `config[0]` encodes x directly. +/// +/// # Example +/// +/// ``` +/// use problemreductions::models::algebraic::SimultaneousIncongruences; +/// use problemreductions::{Problem, Solver, BruteForce}; +/// +/// // pairs: [(2,2),(1,3),(2,5),(3,7)] — lcm=210, x=5 is a solution +/// let problem = SimultaneousIncongruences::new(vec![(2,2),(1,3),(2,5),(3,7)]).unwrap(); +/// let solver = BruteForce::new(); +/// let witness = solver.find_witness(&problem); +/// assert!(witness.is_some()); +/// ``` +#[derive(Debug, Clone, Serialize)] +pub struct SimultaneousIncongruences { + /// Incongruence pairs (aᵢ, bᵢ). + pairs: Vec<(u64, u64)>, +} + +/// Maximum lcm value we will compute in full; if the lcm exceeds this cap we +/// return this value to keep the brute-force search space manageable. +const MAX_LCM: u128 = 1_000_000; + +fn lcm128(a: u128, b: u128) -> u128 { + if a == 0 || b == 0 { + return 0; + } + let g = gcd128(a, b); + // Use saturating arithmetic to avoid overflow; cap at MAX_LCM. + (a / g).saturating_mul(b).min(MAX_LCM) +} + +fn gcd128(mut a: u128, mut b: u128) -> u128 { + while b != 0 { + let t = b; + b = a % b; + a = t; + } + a +} + +impl SimultaneousIncongruences { + fn validate_inputs(pairs: &[(u64, u64)]) -> Result<(), String> { + for (i, &(a, b)) in pairs.iter().enumerate() { + if b == 0 { + return Err(format!("Modulus b at index {i} must be positive (got b=0)")); + } + if a == 0 { + return Err(format!( + "Residue a at index {i} must be at least 1 (got a=0)" + )); + } + if a > b { + return Err(format!( + "Residue a ({a}) must not exceed modulus b ({b}) at index {i}" + )); + } + } + Ok(()) + } + + /// Create a new `SimultaneousIncongruences` instance, returning an error + /// if any pair is invalid. + pub fn new(pairs: Vec<(u64, u64)>) -> Result { + Self::validate_inputs(&pairs)?; + Ok(Self { pairs }) + } + + /// Get the number of incongruence pairs. + pub fn num_pairs(&self) -> usize { + self.pairs.len() + } + + /// Get the incongruence pairs. + pub fn pairs(&self) -> &[(u64, u64)] { + &self.pairs + } + + /// Compute the LCM of all moduli (capped at `MAX_LCM`). + pub fn lcm_moduli(&self) -> u64 { + if self.pairs.is_empty() { + return 1; + } + let lcm = self + .pairs + .iter() + .fold(1u128, |acc, &(_, b)| lcm128(acc, b as u128)); + lcm as u64 + } +} + +#[derive(Deserialize)] +struct SimultaneousIncongruencesData { + pairs: Vec<(u64, u64)>, +} + +impl<'de> Deserialize<'de> for SimultaneousIncongruences { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let data = SimultaneousIncongruencesData::deserialize(deserializer)?; + Self::new(data.pairs).map_err(D::Error::custom) + } +} + +impl Problem for SimultaneousIncongruences { + const NAME: &'static str = "SimultaneousIncongruences"; + type Value = Or; + + fn variant() -> Vec<(&'static str, &'static str)> { + crate::variant_params![] + } + + fn dims(&self) -> Vec { + let lcm = self.lcm_moduli() as usize; + vec![lcm] + } + + fn evaluate(&self, config: &[usize]) -> Or { + if config.len() != 1 { + return Or(false); + } + let x = config[0] as u64; + // x is a solution iff x % bᵢ ≠ aᵢ % bᵢ for every pair. + Or(self.pairs.iter().all(|&(a, b)| x % b != a % b)) + } +} + +crate::declare_variants! { + default SimultaneousIncongruences => "num_pairs", +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_model_example_specs() -> Vec { + vec![crate::example_db::specs::ModelExampleSpec { + id: "simultaneous_incongruences", + instance: Box::new( + SimultaneousIncongruences::new(vec![(2, 2), (1, 3), (2, 5), (3, 7)]).unwrap(), + ), + // x=5: 5%2=1≠0(=2%2), 5%3=2≠1, 5%5=0≠2, 5%7=5≠3 ✓ + optimal_config: vec![5], + optimal_value: serde_json::json!(true), + }] +} + +#[cfg(test)] +#[path = "../../unit_tests/models/algebraic/simultaneous_incongruences.rs"] +mod tests; diff --git a/src/models/mod.rs b/src/models/mod.rs index 82d7a06e..826d40fe 100644 --- a/src/models/mod.rs +++ b/src/models/mod.rs @@ -12,7 +12,8 @@ pub mod set; pub use algebraic::{ ClosestVectorProblem, ConsecutiveBlockMinimization, ConsecutiveOnesMatrixAugmentation, ConsecutiveOnesSubmatrix, FeasibleBasisExtension, QuadraticAssignment, - QuadraticDiophantineEquations, SparseMatrixCompression, BMF, ILP, QUBO, + QuadraticDiophantineEquations, SimultaneousIncongruences, SparseMatrixCompression, BMF, ILP, + QUBO, }; pub use formula::{ CNFClause, CircuitSAT, KSatisfiability, NAESatisfiability, QuantifiedBooleanFormulas, diff --git a/src/unit_tests/models/algebraic/simultaneous_incongruences.rs b/src/unit_tests/models/algebraic/simultaneous_incongruences.rs new file mode 100644 index 00000000..db78bf38 --- /dev/null +++ b/src/unit_tests/models/algebraic/simultaneous_incongruences.rs @@ -0,0 +1,128 @@ +use crate::models::algebraic::SimultaneousIncongruences; +use crate::solvers::BruteForce; +use crate::traits::Problem; +use crate::types::Or; + +fn example_problem() -> SimultaneousIncongruences { + // pairs: [(2,2),(1,3),(2,5),(3,7)] — lcm=210, x=5 is a solution + SimultaneousIncongruences::new(vec![(2, 2), (1, 3), (2, 5), (3, 7)]).unwrap() +} + +fn covering_system() -> SimultaneousIncongruences { + // Erdős covering system: {0 mod 2, 0 mod 3, 1 mod 4, 5 mod 6, 7 mod 12} + // This covers all integers — note this uses 0-based residues but our + // constructor requires 1 ≤ aᵢ ≤ bᵢ, so we use aᵢ=bᵢ to represent ≡0. + // A simpler unsatisfiable instance: two pairs that together cover everything. + // x≢2 (mod 2) means x odd, and x≢1 (mod 2) means x even — these together + // leave no valid x. + SimultaneousIncongruences::new(vec![(2, 2), (1, 2)]).unwrap() +} + +#[test] +fn test_simultaneous_incongruences_creation_and_accessors() { + let p = example_problem(); + assert_eq!(p.num_pairs(), 4); + assert_eq!(p.pairs(), &[(2, 2), (1, 3), (2, 5), (3, 7)]); + // lcm(2,3,5,7) = 210 + assert_eq!(p.lcm_moduli(), 210); + assert_eq!(p.dims(), vec![210]); + assert_eq!(p.num_variables(), 1); + assert_eq!( + ::NAME, + "SimultaneousIncongruences" + ); + assert_eq!(::variant(), vec![]); +} + +#[test] +fn test_simultaneous_incongruences_evaluate_yes() { + let p = example_problem(); + // x=5: 5%2=1≠0(=2%2), 5%3=2≠1, 5%5=0≠2, 5%7=5≠3 ✓ + assert_eq!(p.evaluate(&[5]), Or(true)); + // x=1: 1%2=1≠0(=2%2), 1%3=1=1 — fails for pair (1,3) + assert_eq!(p.evaluate(&[1]), Or(false)); +} + +#[test] +fn test_simultaneous_incongruences_evaluate_no() { + let p = covering_system(); + // pairs (2,2) and (1,2): together require x≡0 (mod 2) AND x≡1 (mod 2), + // which is impossible. + let lcm = p.lcm_moduli(); + assert_eq!(lcm, 2); + // All x in {0,1} should fail + for x in 0..lcm as usize { + assert_eq!(p.evaluate(&[x]), Or(false), "expected false for x={x}"); + } +} + +#[test] +fn test_simultaneous_incongruences_evaluate_invalid_config() { + let p = example_problem(); + assert_eq!(p.evaluate(&[]), Or(false)); + assert_eq!(p.evaluate(&[0, 1]), Or(false)); +} + +#[test] +fn test_simultaneous_incongruences_empty_pairs() { + let p = SimultaneousIncongruences::new(vec![]).unwrap(); + assert_eq!(p.num_pairs(), 0); + assert_eq!(p.lcm_moduli(), 1); + assert_eq!(p.dims(), vec![1]); + // Any x (here x=0) satisfies vacuously + assert_eq!(p.evaluate(&[0]), Or(true)); +} + +#[test] +fn test_simultaneous_incongruences_brute_force_finds_witness() { + let p = example_problem(); + let solver = BruteForce::new(); + let witness = solver.find_witness(&p).unwrap(); + assert_eq!(p.evaluate(&witness), Or(true)); +} + +#[test] +fn test_simultaneous_incongruences_brute_force_no_witness() { + let p = covering_system(); + let solver = BruteForce::new(); + assert!(solver.find_witness(&p).is_none()); +} + +#[test] +fn test_simultaneous_incongruences_serialization() { + let p = example_problem(); + let json = serde_json::to_value(&p).unwrap(); + assert_eq!( + json, + serde_json::json!({"pairs": [[2,2],[1,3],[2,5],[3,7]]}) + ); + let restored: SimultaneousIncongruences = serde_json::from_value(json).unwrap(); + assert_eq!(restored.pairs(), p.pairs()); +} + +#[test] +fn test_simultaneous_incongruences_deserialization_rejects_invalid() { + // b=0 + let r: Result = + serde_json::from_value(serde_json::json!({"pairs": [[1,0]]})); + assert!(r.is_err()); + // a=0 + let r: Result = + serde_json::from_value(serde_json::json!({"pairs": [[0,2]]})); + assert!(r.is_err()); + // a > b + let r: Result = + serde_json::from_value(serde_json::json!({"pairs": [[3,2]]})); + assert!(r.is_err()); +} + +#[test] +fn test_simultaneous_incongruences_paper_example() { + // Canonical paper example: pairs [(2,2),(1,3),(2,5),(3,7)], x=5 is a solution + let p = SimultaneousIncongruences::new(vec![(2, 2), (1, 3), (2, 5), (3, 7)]).unwrap(); + assert_eq!(p.evaluate(&[5]), Or(true)); + + let solver = BruteForce::new(); + let witness = solver.find_witness(&p).unwrap(); + assert_eq!(p.evaluate(&witness), Or(true)); +} From e538decc6f65bcfc6a160ed69ecdc162ce2c9171 Mon Sep 17 00:00:00 2001 From: Xiwei Pan Date: Mon, 30 Mar 2026 08:32:37 +0800 Subject: [PATCH 09/21] feat: add EquilibriumPoint model (#549) Implement the discrete Nash equilibrium existence problem (GJ AN15) with brute-force solver, CLI support, canonical example, and paper entry. Co-Authored-By: Claude Opus 4.6 (1M context) --- docs/paper/reductions.typ | 58 ++++ problemreductions-cli/src/commands/create.rs | 11 +- src/models/algebraic/equilibrium_point.rs | 270 ++++++++++++++++++ src/models/algebraic/mod.rs | 4 + src/models/mod.rs | 2 +- .../models/algebraic/equilibrium_point.rs | 193 +++++++++++++ 6 files changed, 536 insertions(+), 2 deletions(-) create mode 100644 src/models/algebraic/equilibrium_point.rs create mode 100644 src/unit_tests/models/algebraic/equilibrium_point.rs diff --git a/docs/paper/reductions.typ b/docs/paper/reductions.typ index 10ac82c7..143e5cda 100644 --- a/docs/paper/reductions.typ +++ b/docs/paper/reductions.typ @@ -181,6 +181,7 @@ "PreemptiveScheduling": [Preemptive Scheduling], "PrimeAttributeName": [Prime Attribute Name], "QuadraticAssignment": [Quadratic Assignment], + "EquilibriumPoint": [Equilibrium Point], "QuadraticCongruences": [Quadratic Congruences], "QuadraticDiophantineEquations": [Quadratic Diophantine Equations], "SimultaneousIncongruences": [Simultaneous Incongruences], @@ -3421,6 +3422,63 @@ A classical NP-complete problem from Garey and Johnson @garey1979[Ch.~3, p.~76], ] } +#{ + let x = load-model-example("EquilibriumPoint") + let polynomials = x.instance.polynomials + let range_sets = x.instance.range_sets + let config = x.optimal_config + let n = range_sets.len() + // Recover assignment: y_i = range_sets[i][config[i]] + let assignment = range(n).map(i => range_sets.at(i).at(config.at(i))) + // Helper: evaluate a single affine factor at assignment + let eval-factor(coeffs) = { + coeffs.at(0) + range(n).fold(0, (acc, j) => acc + coeffs.at(j + 1) * assignment.at(j)) + } + // Helper: evaluate F_i (product of all factors) + let eval-payoff(i) = { + polynomials.at(i).fold(1, (prod, f) => prod * eval-factor(f)) + } + [ + #problem-def("EquilibriumPoint")[ + Given $n$ players, finite strategy sets $M_i subset ZZ$, and polynomial payoff functions $F_i : M_1 times dots.c times M_n -> ZZ$ expressed as products of affine factors, determine whether there exists a pure-strategy Nash equilibrium: an assignment $bold(y) = (y_1, dots, y_n)$ with $y_i in M_i$ such that for every player $i$ and every $y'_i in M_i$, $F_i(bold(y)) >= F_i(bold(y)^((-i, y'_i)))$, where $bold(y)^((-i, y'_i))$ is $bold(y)$ with the $i$-th component replaced by $y'_i$. + ][ + Equilibrium Point (problem AN15 in Garey & Johnson @garey1979) is NP-complete by reduction from 3-SAT due to Sahni @sahni1974. The problem captures a fundamental question in algorithmic game theory: does a multi-player game with polynomial payoffs admit a stable strategy profile from which no player benefits by deviating? The payoff functions are represented as products of affine factors, enabling compact encoding of degree-$k$ polynomials with $O(k n)$ coefficients per player. The problem remains NP-complete even when all strategy sets are binary. + + *Example.* Consider $n = #n$ players with strategy sets $M_i = \{#range_sets.at(0).map(str).join(", ")\}$ for all $i$, and payoff functions: + + #let fmt-factors(factors) = { + factors.map(coeffs => { + // Format: a0 + a1*x1 + ... + an*xn, omit zero terms + let terms = (coeffs.at(0),) + range(n).map(j => coeffs.at(j+1)) + let parts = () + if terms.at(0) != 0 { parts.push(str(terms.at(0))) } + for j in range(n) { + let c = terms.at(j+1) + if c == 1 { parts.push("x" + str(j+1)) } + else if c == -1 { parts.push("-x" + str(j+1)) } + else if c != 0 { parts.push(str(c) + "x" + str(j+1)) } + } + if parts.len() == 0 { parts.push("0") } + "(" + parts.join(" + ") + ")" + }).join(sym.dot.op) + } + #align(center)[ + $F_1 = #fmt-factors(polynomials.at(0))$, + $F_2 = #fmt-factors(polynomials.at(1))$, + $F_3 = #fmt-factors(polynomials.at(2))$ + ] + + The assignment $bold(y) = (#assignment.map(str).join(", "))$ is a Nash equilibrium: $F_i(bold(y)) = #range(n).map(i => str(eval-payoff(i))).join(", ")$ for $i = 1, 2, 3$, and no player can strictly improve their payoff by deviating. + + #pred-commands( + "pred create --example EquilibriumPoint -o ep.json", + "pred solve ep.json --solver brute-force", + "pred evaluate ep.json --config " + config.map(str).join(","), + ) + ] + ] +} + #{ let x = load-model-example("ClosestVectorProblem") let basis = x.instance.basis diff --git a/problemreductions-cli/src/commands/create.rs b/problemreductions-cli/src/commands/create.rs index 14d86376..e0873659 100644 --- a/problemreductions-cli/src/commands/create.rs +++ b/problemreductions-cli/src/commands/create.rs @@ -9,7 +9,7 @@ use anyhow::{bail, Context, Result}; use problemreductions::export::{ModelExample, ProblemRef, ProblemSide, RuleExample}; use problemreductions::models::algebraic::{ ClosestVectorProblem, ConsecutiveBlockMinimization, ConsecutiveOnesMatrixAugmentation, - ConsecutiveOnesSubmatrix, FeasibleBasisExtension, QuadraticCongruences, + ConsecutiveOnesSubmatrix, EquilibriumPoint, FeasibleBasisExtension, QuadraticCongruences, QuadraticDiophantineEquations, SimultaneousIncongruences, SparseMatrixCompression, BMF, }; use problemreductions::models::formula::Quantifier; @@ -796,6 +796,7 @@ fn example_for(canonical: &str, graph_type: Option<&str>) -> &'static str { "--domain-size 6 --relations \"2:0,3|1,3|2,4;3:0,1,5|1,2,5\" --conjuncts-spec \"0:v0,c3;0:v1,c3;1:v0,v1,c5\"" } "ConjunctiveQueryFoldability" => "(use --example ConjunctiveQueryFoldability)", + "EquilibriumPoint" => "(use --example EquilibriumPoint)", "SequencingToMinimizeMaximumCumulativeCost" => { "--costs 2,-1,3,-2,1,-3 --precedence-pairs \"0>2,1>2,1>3,2>4,3>5,4>5\"" } @@ -4812,6 +4813,14 @@ pub fn create(args: &CreateArgs, out: &OutputConfig) -> Result<()> { ) } + "EquilibriumPoint" => { + bail!( + "EquilibriumPoint has complex nested input (polynomial factor lists).\n\n\ + Use: pred create --example EquilibriumPoint\n\ + Or provide a JSON file directly." + ) + } + // PartitionIntoPathsOfLength2 "PartitionIntoPathsOfLength2" => { let (graph, _) = parse_graph(args).map_err(|e| { diff --git a/src/models/algebraic/equilibrium_point.rs b/src/models/algebraic/equilibrium_point.rs new file mode 100644 index 00000000..d87f37c3 --- /dev/null +++ b/src/models/algebraic/equilibrium_point.rs @@ -0,0 +1,270 @@ +//! Equilibrium Point problem implementation. +//! +//! Given n players, polynomial payoff functions F_i, and finite strategy sets M_i, +//! determine whether there exists a pure-strategy Nash equilibrium: an assignment +//! y = (y_1, ..., y_n) with y_i ∈ M_i such that for every player i, +//! F_i(y) ≥ F_i(y with y_i replaced by any y' ∈ M_i). + +use crate::registry::{FieldInfo, ProblemSchemaEntry, ProblemSizeFieldEntry}; +use crate::traits::Problem; +use crate::types::Or; +use serde::de::Error as _; +use serde::{Deserialize, Deserializer, Serialize}; + +inventory::submit! { + ProblemSchemaEntry { + name: "EquilibriumPoint", + display_name: "Equilibrium Point", + aliases: &[], + dimensions: &[], + module_path: module_path!(), + description: "Decide whether a pure-strategy Nash equilibrium exists for a multi-player game with polynomial payoff functions", + fields: &[ + FieldInfo { + name: "polynomials", + type_name: "Vec>>", + description: "polynomials[i] is a list of affine factors for F_i; each factor [a0,a1,...,an] represents a0 + a1*x1 + ... + an*xn", + }, + FieldInfo { + name: "range_sets", + type_name: "Vec>", + description: "range_sets[i] is the finite strategy set M_i for player i", + }, + ], + } +} + +inventory::submit! { + ProblemSizeFieldEntry { + name: "EquilibriumPoint", + fields: &["num_players"], + } +} + +/// Equilibrium Point problem. +/// +/// Given n players, each with a finite strategy set M_i and a polynomial payoff +/// function F_i, decide whether there exists a pure-strategy Nash equilibrium: +/// an assignment y = (y_1, ..., y_n) with y_i ∈ M_i such that no player can +/// improve their payoff by unilaterally deviating. +/// +/// F_i is expressed as a product of affine factors. Each factor is represented +/// as a coefficient vector `[a0, a1, ..., an]` evaluating to +/// `a0 + a1*y_1 + ... + an*y_n`. +/// +/// # Configuration +/// +/// `config[i]` is an index into `range_sets[i]`; the assignment is +/// `y_i = range_sets[i][config[i]]`. +/// +/// # Example +/// +/// ``` +/// use problemreductions::models::algebraic::EquilibriumPoint; +/// use problemreductions::{Problem, Solver, BruteForce}; +/// +/// // 3 players, M_i = {0, 1} for all i. +/// // F1 = x1*x2*x3, F2 = (1-x1)*x2, F3 = x1*(1-x3) +/// let polynomials = vec![ +/// vec![vec![0,1,0,0], vec![0,0,1,0], vec![0,0,0,1]], +/// vec![vec![1,-1,0,0], vec![0,0,1,0]], +/// vec![vec![0,1,0,0], vec![1,0,0,-1]], +/// ]; +/// let range_sets = vec![vec![0,1], vec![0,1], vec![0,1]]; +/// let problem = EquilibriumPoint::new(polynomials, range_sets).unwrap(); +/// let solver = BruteForce::new(); +/// let witness = solver.find_witness(&problem); +/// assert!(witness.is_some()); +/// ``` +#[derive(Debug, Clone, Serialize)] +pub struct EquilibriumPoint { + /// polynomials[i] is a list of affine factors for F_i. + /// F_i(y) = product over all factors of (a0 + a1*y1 + ... + an*yn). + polynomials: Vec>>, + /// range_sets[i] is the finite strategy set M_i for player i. + range_sets: Vec>, +} + +impl EquilibriumPoint { + fn validate_inputs( + polynomials: &[Vec>], + range_sets: &[Vec], + ) -> Result<(), String> { + let n = polynomials.len(); + if range_sets.len() != n { + return Err(format!( + "polynomials has {n} entries but range_sets has {} entries; lengths must match", + range_sets.len() + )); + } + for (i, m) in range_sets.iter().enumerate() { + if m.is_empty() { + return Err(format!("range_sets[{i}] must be non-empty")); + } + } + // Each factor must have length n+1 (constant + one coefficient per player). + let expected_factor_len = n + 1; + for (i, factors) in polynomials.iter().enumerate() { + for (j, factor) in factors.iter().enumerate() { + if factor.len() != expected_factor_len { + return Err(format!( + "polynomials[{i}][{j}] has {} coefficients but expected {expected_factor_len} (1 + num_players)", + factor.len() + )); + } + } + } + Ok(()) + } + + /// Create a new `EquilibriumPoint` instance, returning an error on invalid input. + pub fn new(polynomials: Vec>>, range_sets: Vec>) -> Result { + Self::validate_inputs(&polynomials, &range_sets)?; + Ok(Self { + polynomials, + range_sets, + }) + } + + /// Get the number of players. + pub fn num_players(&self) -> usize { + self.polynomials.len() + } + + /// Get the polynomial factor lists. + pub fn polynomials(&self) -> &[Vec>] { + &self.polynomials + } + + /// Get the strategy sets. + pub fn range_sets(&self) -> &[Vec] { + &self.range_sets + } + + /// Evaluate F_i at a given assignment y (as i64 slice). + /// + /// Returns the product of all affine factors for player i. + fn eval_payoff(&self, player: usize, assignment: &[i64]) -> i64 { + let factors = &self.polynomials[player]; + if factors.is_empty() { + return 0; + } + factors.iter().fold(1i64, |prod, coeffs| { + // coeffs[0] + coeffs[1]*y_1 + ... + coeffs[n]*y_n + let val: i64 = coeffs[0] + + coeffs[1..] + .iter() + .zip(assignment.iter()) + .map(|(&c, &y)| c * y) + .sum::(); + prod * val + }) + } +} + +#[derive(Deserialize)] +struct EquilibriumPointData { + polynomials: Vec>>, + range_sets: Vec>, +} + +impl<'de> Deserialize<'de> for EquilibriumPoint { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let data = EquilibriumPointData::deserialize(deserializer)?; + Self::new(data.polynomials, data.range_sets).map_err(D::Error::custom) + } +} + +impl Problem for EquilibriumPoint { + const NAME: &'static str = "EquilibriumPoint"; + type Value = Or; + + fn variant() -> Vec<(&'static str, &'static str)> { + crate::variant_params![] + } + + fn dims(&self) -> Vec { + self.range_sets.iter().map(|m| m.len()).collect() + } + + fn evaluate(&self, config: &[usize]) -> Or { + let n = self.num_players(); + if config.len() != n { + return Or(false); + } + // Validate config indices are in-bounds. + for (i, &idx) in config.iter().enumerate() { + if idx >= self.range_sets[i].len() { + return Or(false); + } + } + + // Extract assignment y_i = range_sets[i][config[i]]. + let assignment: Vec = config + .iter() + .enumerate() + .map(|(i, &idx)| self.range_sets[i][idx]) + .collect(); + + // Check best-response condition for each player. + for i in 0..n { + let current_payoff = self.eval_payoff(i, &assignment); + // Try every y' in M_i for player i. + let mut best_response_satisfied = true; + for &alt in &self.range_sets[i] { + if alt == assignment[i] { + continue; + } + // Build alternative assignment with player i using alt. + let mut alt_assignment = assignment.clone(); + alt_assignment[i] = alt; + let alt_payoff = self.eval_payoff(i, &alt_assignment); + if alt_payoff > current_payoff { + best_response_satisfied = false; + break; + } + } + if !best_response_satisfied { + return Or(false); + } + } + Or(true) + } +} + +crate::declare_variants! { + default EquilibriumPoint => "2^num_players", +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_model_example_specs() -> Vec { + // 3 players, M_i = {0, 1} for all i. + // F1 = x1*x2*x3: factors [[0,1,0,0],[0,0,1,0],[0,0,0,1]] + // F2 = (1-x1)*x2: factors [[1,-1,0,0],[0,0,1,0]] + // F3 = x1*(1-x3): factors [[0,1,0,0],[1,0,0,-1]] + // + // config [0,1,0] → assignment (0,1,0). + // F1(0,1,0) = 0*1*0 = 0. Deviations for player 1: y'=1 → F1(1,1,0)=0. No improvement. + // F2(0,1,0) = (1-0)*1 = 1. Deviations for player 2: y'=0 → F2(0,0,0)=0. No improvement. + // F3(0,1,0) = 0*(1-0) = 0. Deviations for player 3: y'=1 → F3(0,1,1)=0. No improvement. + // → (0,1,0) is a Nash equilibrium. + let polynomials = vec![ + vec![vec![0, 1, 0, 0], vec![0, 0, 1, 0], vec![0, 0, 0, 1]], + vec![vec![1, -1, 0, 0], vec![0, 0, 1, 0]], + vec![vec![0, 1, 0, 0], vec![1, 0, 0, -1]], + ]; + let range_sets = vec![vec![0, 1], vec![0, 1], vec![0, 1]]; + vec![crate::example_db::specs::ModelExampleSpec { + id: "equilibrium_point", + instance: Box::new(EquilibriumPoint::new(polynomials, range_sets).unwrap()), + optimal_config: vec![0, 1, 0], + optimal_value: serde_json::json!(true), + }] +} + +#[cfg(test)] +#[path = "../../unit_tests/models/algebraic/equilibrium_point.rs"] +mod tests; diff --git a/src/models/algebraic/mod.rs b/src/models/algebraic/mod.rs index 9101e918..d95acf7c 100644 --- a/src/models/algebraic/mod.rs +++ b/src/models/algebraic/mod.rs @@ -7,6 +7,7 @@ //! - [`BMF`]: Boolean Matrix Factorization //! - [`ConsecutiveBlockMinimization`]: Consecutive Block Minimization //! - [`ConsecutiveOnesSubmatrix`]: Consecutive Ones Submatrix (column selection with C1P) +//! - [`EquilibriumPoint`]: Pure-strategy Nash Equilibrium existence //! - [`QuadraticAssignment`]: Quadratic Assignment Problem //! - [`QuadraticCongruences`]: Decide x² ≡ a (mod b) for x in {1, ..., c-1} //! - [`QuadraticDiophantineEquations`]: Decide ax² + by = c in positive integers @@ -18,6 +19,7 @@ pub(crate) mod closest_vector_problem; pub(crate) mod consecutive_block_minimization; pub(crate) mod consecutive_ones_matrix_augmentation; pub(crate) mod consecutive_ones_submatrix; +pub(crate) mod equilibrium_point; pub(crate) mod feasible_basis_extension; pub(crate) mod ilp; pub(crate) mod quadratic_assignment; @@ -32,6 +34,7 @@ pub use closest_vector_problem::{ClosestVectorProblem, VarBounds}; pub use consecutive_block_minimization::ConsecutiveBlockMinimization; pub use consecutive_ones_matrix_augmentation::ConsecutiveOnesMatrixAugmentation; pub use consecutive_ones_submatrix::ConsecutiveOnesSubmatrix; +pub use equilibrium_point::EquilibriumPoint; pub use feasible_basis_extension::FeasibleBasisExtension; pub use ilp::{Comparison, LinearConstraint, ObjectiveSense, VariableDomain, ILP}; pub use quadratic_assignment::QuadraticAssignment; @@ -55,6 +58,7 @@ pub(crate) fn canonical_model_example_specs() -> Vec EquilibriumPoint { + let polynomials = vec![ + vec![vec![0, 1, 0, 0], vec![0, 0, 1, 0], vec![0, 0, 0, 1]], + vec![vec![1, -1, 0, 0], vec![0, 0, 1, 0]], + vec![vec![0, 1, 0, 0], vec![1, 0, 0, -1]], + ]; + let range_sets = vec![vec![0, 1], vec![0, 1], vec![0, 1]]; + EquilibriumPoint::new(polynomials, range_sets).unwrap() +} + +/// Simple 2-player coordination game: F1 = F2 = x1*x2. +/// Both (0,0) and (1,1) are equilibria (payoff 0 each with M_i = {0,1}). +/// Actually F1(0,0)=0, dev to (1,0): F1(1,0)=0 — no strict improvement. +/// So (0,0) is an equilibrium trivially (no player can strictly improve). +fn coordination_problem() -> EquilibriumPoint { + // F1 = x1*x2: factors [[0,1,0,0],[0,0,1,0]] (but only constant + 2 player coeffs). + // Wait: 2 players → factor len = 3. + let polynomials = vec![ + vec![vec![0, 1, 0], vec![0, 0, 1]], // F1 = x1 * x2 + vec![vec![0, 1, 0], vec![0, 0, 1]], // F2 = x1 * x2 + ]; + let range_sets = vec![vec![0, 1], vec![0, 1]]; + EquilibriumPoint::new(polynomials, range_sets).unwrap() +} + +/// A 2-player constant-sum game: F1 = 1 (constant), F2 = 1 (constant). +/// Every config is a Nash equilibrium (no one can improve). +fn trivial_equilibrium_problem() -> EquilibriumPoint { + let polynomials = vec![ + vec![vec![1, 0, 0]], // F1 = 1 + vec![vec![1, 0, 0]], // F2 = 1 + ]; + let range_sets = vec![vec![0, 1], vec![0, 1]]; + EquilibriumPoint::new(polynomials, range_sets).unwrap() +} + +#[test] +fn test_equilibrium_point_creation_and_accessors() { + let p = canonical_problem(); + assert_eq!(p.num_players(), 3); + assert_eq!(p.polynomials().len(), 3); + assert_eq!(p.range_sets().len(), 3); + assert_eq!(p.range_sets()[0], vec![0, 1]); + assert_eq!(p.range_sets()[1], vec![0, 1]); + assert_eq!(p.range_sets()[2], vec![0, 1]); + assert_eq!(p.dims(), vec![2, 2, 2]); + assert_eq!(p.num_variables(), 3); + assert_eq!(::NAME, "EquilibriumPoint"); + assert_eq!(::variant(), vec![]); +} + +#[test] +fn test_equilibrium_point_evaluate_canonical_equilibrium() { + let p = canonical_problem(); + // config [0,1,0] → (0,1,0) is the known equilibrium. + assert_eq!(p.evaluate(&[0, 1, 0]), Or(true)); +} + +#[test] +fn test_equilibrium_point_evaluate_non_equilibria() { + let p = canonical_problem(); + // (1,1,1): F1=1. Dev player1 to 0: F1(0,1,1)=0 < 1 — no. Dev player2 to 0: F1(1,0,1)=0 <1 — no. Dev player3 to 0: F1(1,1,0)=0 <1 — no. + // F2=(1-1)*1=0. Dev player2 to 0: F2(1,0,1)=0 — no improvement. Player1 dev to 0: F2(0,1,1)=1*1=1 > 0 → NOT equilibrium for player 1 in F2? + // Wait F2 is player-2's payoff. Let me recheck: player 1 can deviate but that changes F2 too. + // Actually equilibrium condition: for player i, no y'_i in M_i gives F_i(y) < F_i(y with y_i=y'). + // At (1,1,1): F2(1,1,1)=(1-1)*1=0. Dev player2 to 0: F2(1,0,1)=(1-1)*0=0 — no improvement. + // But dev player1 (player 1) only affects F1! F1(1,1,1)=1*1*1=1, dev to 0: F1(0,1,1)=0*1*1=0 — no improvement for player1. + // F3(1,1,1)=1*(1-1)=0. Dev player3 to 0: F3(1,1,0)=1*(1-0)=1 > 0 → player 3 can improve! NOT equilibrium. + assert_eq!(p.evaluate(&[1, 1, 1]), Or(false)); + + // (0,0,0): F2(0,0,0)=(1-0)*0=0. Dev player2 to 1: F2(0,1,0)=(1-0)*1=1 > 0 → NOT equilibrium. + assert_eq!(p.evaluate(&[0, 0, 0]), Or(false)); +} + +#[test] +fn test_equilibrium_point_invalid_config_lengths() { + let p = canonical_problem(); + assert_eq!(p.evaluate(&[]), Or(false)); + assert_eq!(p.evaluate(&[0, 1]), Or(false)); + assert_eq!(p.evaluate(&[0, 1, 0, 0]), Or(false)); +} + +#[test] +fn test_equilibrium_point_brute_force_finds_witness() { + let solver = BruteForce::new(); + let p = canonical_problem(); + let witness = solver.find_witness(&p).unwrap(); + assert_eq!(p.evaluate(&witness), Or(true)); +} + +#[test] +fn test_equilibrium_point_coordination_game() { + let p = coordination_problem(); + // (0,0): F1(0,0)=0*0=0. Dev p1 to 1: F1(1,0)=1*0=0 — no improvement. Dev p2 to 1: F2(0,1)=0*1=0 — no improvement. + // So (0,0) is an equilibrium. + assert_eq!(p.evaluate(&[0, 0]), Or(true)); + // (1,1): F1(1,1)=1. Dev p1 to 0: F1(0,1)=0 < 1 — no improvement. Dev p2 to 0: F2(1,0)=0 < 1 — no improvement. + // (1,1) is also an equilibrium. + assert_eq!(p.evaluate(&[1, 1]), Or(true)); + // (0,1): F1(0,1)=0*1=0. Dev p1 to 1: F1(1,1)=1 > 0 → NOT equilibrium. + assert_eq!(p.evaluate(&[0, 1]), Or(false)); +} + +#[test] +fn test_equilibrium_point_trivial_constant_payoffs() { + let p = trivial_equilibrium_problem(); + // Every config is an equilibrium since payoff is always 1. + assert_eq!(p.evaluate(&[0, 0]), Or(true)); + assert_eq!(p.evaluate(&[0, 1]), Or(true)); + assert_eq!(p.evaluate(&[1, 0]), Or(true)); + assert_eq!(p.evaluate(&[1, 1]), Or(true)); +} + +#[test] +fn test_equilibrium_point_serialization() { + let p = canonical_problem(); + let json = serde_json::to_value(&p).unwrap(); + assert!(json.get("polynomials").is_some()); + assert!(json.get("range_sets").is_some()); + + let restored: EquilibriumPoint = serde_json::from_value(json).unwrap(); + assert_eq!(restored.num_players(), p.num_players()); + assert_eq!(restored.range_sets(), p.range_sets()); + assert_eq!(restored.polynomials(), p.polynomials()); +} + +#[test] +fn test_equilibrium_point_deserialization_rejects_invalid() { + // Mismatched polynomials and range_sets lengths. + let r: Result = serde_json::from_value(serde_json::json!({ + "polynomials": [[[0, 1, 0]]], + "range_sets": [[0, 1], [0, 1]] + })); + assert!(r.is_err()); + + // Empty range_set. + let r: Result = serde_json::from_value(serde_json::json!({ + "polynomials": [[[1, 0]]], + "range_sets": [[]] + })); + assert!(r.is_err()); + + // Wrong factor length (should be n+1 = 2, got 3). + let r: Result = serde_json::from_value(serde_json::json!({ + "polynomials": [[[0, 1, 2, 3]]], + "range_sets": [[0, 1]] + })); + assert!(r.is_err()); +} + +#[test] +fn test_equilibrium_point_paper_example() { + // Canonical example: config [0,1,0] is the equilibrium. + let p = canonical_problem(); + assert_eq!(p.evaluate(&[0, 1, 0]), Or(true)); + + let solver = BruteForce::new(); + let witness = solver.find_witness(&p).unwrap(); + assert_eq!(p.evaluate(&witness), Or(true)); +} + +#[test] +fn test_equilibrium_point_validation_panics() { + // polynomials length != range_sets length. + assert!( + EquilibriumPoint::new(vec![vec![vec![0, 1, 0]]], vec![vec![0, 1], vec![0, 1]],).is_err() + ); + + // Empty range_set. + assert!(EquilibriumPoint::new(vec![vec![vec![1, 0]]], vec![vec![]]).is_err()); + + // Factor has wrong length. + assert!(EquilibriumPoint::new(vec![vec![vec![0, 1, 2, 3]]], vec![vec![0, 1]],).is_err()); +} + +#[test] +fn test_equilibrium_point_single_player() { + // 1-player game: F1 = x1. M1 = {0, 2}. Equilibrium when player picks max. + // F1(0) = 0. Deviation to 2: F1(2) = 2 > 0 → config [0] NOT equilibrium. + // F1(2) = 2. No deviation improves. config [1] IS equilibrium. + let polynomials = vec![vec![vec![0, 1]]]; + let range_sets = vec![vec![0, 2]]; + let p = EquilibriumPoint::new(polynomials, range_sets).unwrap(); + assert_eq!(p.evaluate(&[0]), Or(false)); + assert_eq!(p.evaluate(&[1]), Or(true)); +} From 6d3e5d1fde30e8ed846ad83e25c57d312dbe60a7 Mon Sep 17 00:00:00 2001 From: Xiwei Pan Date: Mon, 30 Mar 2026 08:47:49 +0800 Subject: [PATCH 10/21] feat: add DirectedHamiltonianPath model (#813) Implement directed Hamiltonian path feasibility problem with ILP reduction, CLI support, canonical example, and paper entry. Co-Authored-By: Claude Opus 4.6 (1M context) --- docs/paper/reductions.typ | 41 ++++ problemreductions-cli/src/commands/create.rs | 22 +- src/models/graph/directed_hamiltonian_path.rs | 193 ++++++++++++++++++ src/models/graph/mod.rs | 4 + src/models/mod.rs | 6 +- src/rules/directedhamiltonianpath_ilp.rs | 122 +++++++++++ src/rules/mod.rs | 3 + .../models/graph/directed_hamiltonian_path.rs | 147 +++++++++++++ .../rules/directedhamiltonianpath_ilp.rs | 104 ++++++++++ 9 files changed, 637 insertions(+), 5 deletions(-) create mode 100644 src/models/graph/directed_hamiltonian_path.rs create mode 100644 src/rules/directedhamiltonianpath_ilp.rs create mode 100644 src/unit_tests/models/graph/directed_hamiltonian_path.rs create mode 100644 src/unit_tests/rules/directedhamiltonianpath_ilp.rs diff --git a/docs/paper/reductions.typ b/docs/paper/reductions.typ index 143e5cda..c1f02bc8 100644 --- a/docs/paper/reductions.typ +++ b/docs/paper/reductions.typ @@ -85,6 +85,7 @@ "HamiltonianCircuit": [Hamiltonian Circuit], "BiconnectivityAugmentation": [Biconnectivity Augmentation], "HamiltonianPath": [Hamiltonian Path], + "DirectedHamiltonianPath": [Directed Hamiltonian Path], "IntegralFlowBundles": [Integral Flow with Bundles], "LongestCircuit": [Longest Circuit], "LongestPath": [Longest Path], @@ -1112,6 +1113,29 @@ is feasible: each set induces a connected subgraph, the component weights are $2 ] ] } +#{ + let x = load-model-example("DirectedHamiltonianPath") + let nv = x.instance.graph.num_vertices + let arcs = x.instance.graph.arcs + [ + #problem-def("DirectedHamiltonianPath")[ + Given a directed graph $G = (V, A)$, determine whether $G$ contains a _directed Hamiltonian path_, i.e., a simple directed path that visits every vertex exactly once following arc directions. + ][ + A classical NP-complete decision problem from Garey & Johnson (A2.1 GT39). The directed version is NP-complete even for tournaments and remains hard for most restricted digraph classes. + + The best known exact algorithm runs in $O(n^2 dot 2^n)$ time using Held--Karp style dynamic programming with bitmask DP. + + Variables: A permutation of the $n$ vertices, encoded as a Lehmer code with $"dims" = [n, n-1, dots, 1]$. A configuration is satisfying when every consecutive pair in the decoded permutation forms a directed arc. + + *Example.* Consider the directed graph $G$ on #nv vertices with arcs ${#arcs.map(((u, v)) => $(#u arrow.r #v)$).join(", ")}$. The directed Hamiltonian path $0 arrow.r 1 arrow.r 3 arrow.r 2 arrow.r 4 arrow.r 5$ visits every vertex exactly once with all consecutive pairs being arcs. + + #pred-commands( + "pred create --example DirectedHamiltonianPath -o dhp.json", + "pred solve dhp.json", + ) + ] + ] +} #{ let x = load-model-example("LongestPath") let nv = graph-num-vertices(x.instance) @@ -10012,6 +10036,23 @@ The following reductions to Integer Linear Programming are straightforward formu _Solution extraction._ For each position $p$, output the unique vertex $v$ with $x_(v,p) = 1$. ] +#reduction-rule("DirectedHamiltonianPath", "ILP")[ + Assign each vertex to exactly one path position and forbid non-arc pairs at consecutive positions. +][ + _Construction._ Variables: binary $x_(v,k)$ with $x_(v,k) = 1$ iff vertex $v$ is placed at position $k$. The ILP is: + $ + "find" quad & bold(x) \ + "subject to" quad & sum_k x_(v,k) = 1 quad forall v \ + & sum_v x_(v,k) = 1 quad forall k \ + & x_(v,k) + x_(w,k+1) <= 1 quad forall k, (v, w) in.not A \ + & x_(v,k) in {0, 1} + $. + + _Correctness._ ($arrow.r.double$) A directed Hamiltonian path yields a permutation where every consecutive pair is a directed arc; the arc-exclusion constraints are satisfied by definition. ($arrow.l.double$) Any feasible ILP solution defines a vertex permutation whose consecutive pairs are all directed arcs, hence a directed Hamiltonian path. + + _Solution extraction._ For each position $k$, decode the unique vertex $v$ with $x_(v,k) = 1$ to recover the permutation; convert to Lehmer code for the source configuration. +] + #reduction-rule("BottleneckTravelingSalesman", "ILP")[ Use a cyclic position assignment for the tour and a bottleneck variable that dominates the weight of every chosen tour edge. ][ diff --git a/problemreductions-cli/src/commands/create.rs b/problemreductions-cli/src/commands/create.rs index e0873659..e23d4585 100644 --- a/problemreductions-cli/src/commands/create.rs +++ b/problemreductions-cli/src/commands/create.rs @@ -14,8 +14,8 @@ use problemreductions::models::algebraic::{ }; use problemreductions::models::formula::Quantifier; use problemreductions::models::graph::{ - DisjointConnectingPaths, GeneralizedHex, HamiltonianCircuit, HamiltonianPath, - IntegralFlowBundles, LengthBoundedDisjointPaths, LongestCircuit, LongestPath, + DirectedHamiltonianPath, DisjointConnectingPaths, GeneralizedHex, HamiltonianCircuit, + HamiltonianPath, IntegralFlowBundles, LengthBoundedDisjointPaths, LongestCircuit, LongestPath, MinimumCutIntoBoundedSets, MinimumDummyActivitiesPert, MinimumMultiwayCut, MixedChinesePostman, MultipleChoiceBranching, PathConstrainedNetworkFlow, RootedTreeArrangement, SteinerTree, SteinerTreeInGraphs, StrongConnectivityAugmentation, @@ -718,6 +718,9 @@ fn example_for(canonical: &str, graph_type: Option<&str>) -> &'static str { "--arcs \"0>2,0>3,1>2,1>3,2>4,2>5,3>4,3>5\" --capacities 1,1,1,1,1,1,1,1 --source-1 0 --sink-1 4 --source-2 1 --sink-2 5 --requirement-1 1 --requirement-2 1" } "MinimumFeedbackArcSet" => "--arcs \"0>1,1>2,2>0\"", + "DirectedHamiltonianPath" => { + "--arcs \"0>1,0>3,1>3,1>4,2>0,2>4,3>2,3>5,4>5,5>1\" --num-vertices 6" + } "MinimumDummyActivitiesPert" => "--arcs \"0>2,0>3,1>3,1>4,2>5\" --num-vertices 6", "RegisterSufficiency" => { "--arcs \"2>0,2>1,3>1,4>2,4>3,5>0,6>4,6>5\" --bound 3 --num-vertices 7" @@ -4490,6 +4493,21 @@ pub fn create(args: &CreateArgs, out: &OutputConfig) -> Result<()> { ) } + // DirectedHamiltonianPath + "DirectedHamiltonianPath" => { + let arcs_str = args.arcs.as_deref().ok_or_else(|| { + anyhow::anyhow!( + "DirectedHamiltonianPath requires --arcs\n\n\ + Usage: pred create DirectedHamiltonianPath --arcs \"0>1,1>2,2>3\" [--num-vertices N]" + ) + })?; + let (graph, _) = parse_directed_graph(arcs_str, args.num_vertices)?; + ( + ser(DirectedHamiltonianPath::new(graph))?, + resolved_variant.clone(), + ) + } + // AcyclicPartition "AcyclicPartition" => { let usage = "Usage: pred create AcyclicPartition/i32 --arcs \"0>1,0>2,1>3,1>4,2>4,2>5,3>5,4>5\" --weights 2,3,2,1,3,1 --arc-costs 1,1,1,1,1,1,1,1 --weight-bound 5 --cost-bound 5"; diff --git a/src/models/graph/directed_hamiltonian_path.rs b/src/models/graph/directed_hamiltonian_path.rs new file mode 100644 index 00000000..b395853c --- /dev/null +++ b/src/models/graph/directed_hamiltonian_path.rs @@ -0,0 +1,193 @@ +//! Directed Hamiltonian Path problem implementation. +//! +//! The Directed Hamiltonian Path problem asks whether a directed graph contains +//! a simple directed path that visits every vertex exactly once. + +use crate::registry::{FieldInfo, ProblemSchemaEntry, VariantDimension}; +use crate::topology::DirectedGraph; +use crate::traits::Problem; +use serde::{Deserialize, Serialize}; + +inventory::submit! { + ProblemSchemaEntry { + name: "DirectedHamiltonianPath", + display_name: "Directed Hamiltonian Path", + aliases: &["DHP"], + dimensions: &[ + VariantDimension::new("graph", "DirectedGraph", &["DirectedGraph"]), + ], + module_path: module_path!(), + description: "Does the directed graph contain a Hamiltonian path?", + fields: &[ + FieldInfo { name: "graph", type_name: "DirectedGraph", description: "The directed graph G=(V,A)" }, + ], + } +} + +/// The Directed Hamiltonian Path problem. +/// +/// Given a directed graph G = (V, A), determine whether G contains a Hamiltonian path, +/// i.e., a simple directed path that visits every vertex exactly once following arc +/// directions. +/// +/// # Representation +/// +/// A configuration encodes a permutation using the Lehmer code: +/// `dims() = [n, n-1, ..., 2, 1]`, yielding `n!` reachable configurations. +/// Each configuration is decoded to a permutation of `0..n`, and a solution is +/// valid when every consecutive pair `(path[i], path[i+1])` is an arc in the +/// directed graph. +/// +/// # Example +/// +/// ``` +/// use problemreductions::models::graph::DirectedHamiltonianPath; +/// use problemreductions::topology::DirectedGraph; +/// use problemreductions::{Problem, Solver, BruteForce}; +/// +/// // Simple directed path: 0->1->2->3 +/// let graph = DirectedGraph::new(4, vec![(0, 1), (1, 2), (2, 3)]); +/// let problem = DirectedHamiltonianPath::new(graph); +/// +/// let solver = BruteForce::new(); +/// let solution = solver.find_witness(&problem); +/// assert!(solution.is_some()); +/// ``` +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct DirectedHamiltonianPath { + graph: DirectedGraph, +} + +impl DirectedHamiltonianPath { + /// Create a new Directed Hamiltonian Path problem from a directed graph. + pub fn new(graph: DirectedGraph) -> Self { + Self { graph } + } + + /// Get a reference to the underlying directed graph. + pub fn graph(&self) -> &DirectedGraph { + &self.graph + } + + /// Get the number of vertices in the directed graph. + pub fn num_vertices(&self) -> usize { + self.graph.num_vertices() + } + + /// Get the number of arcs in the directed graph. + pub fn num_arcs(&self) -> usize { + self.graph.num_arcs() + } + + /// Check if a configuration is a valid directed Hamiltonian path. + pub fn is_valid_solution(&self, config: &[usize]) -> bool { + let perm = decode_lehmer(config); + is_valid_directed_hamiltonian_path(&self.graph, &perm) + } +} + +impl Problem for DirectedHamiltonianPath { + const NAME: &'static str = "DirectedHamiltonianPath"; + type Value = crate::types::Or; + + fn variant() -> Vec<(&'static str, &'static str)> { + crate::variant_params![] + } + + fn dims(&self) -> Vec { + lehmer_dims(self.graph.num_vertices()) + } + + fn evaluate(&self, config: &[usize]) -> crate::types::Or { + let perm = decode_lehmer(config); + crate::types::Or(is_valid_directed_hamiltonian_path(&self.graph, &perm)) + } +} + +/// Returns the Lehmer code dimension vector for `n` items: `[n, n-1, ..., 2, 1]`. +pub(crate) fn lehmer_dims(n: usize) -> Vec { + (1..=n).rev().collect() +} + +/// Decode a Lehmer code into a permutation. +/// +/// Given a configuration `code` where `code[i] < n - i`, returns the +/// corresponding permutation of `0..n`. +pub(crate) fn decode_lehmer(code: &[usize]) -> Vec { + let n = code.len(); + let mut available: Vec = (0..n).collect(); + let mut perm = Vec::with_capacity(n); + for &idx in code { + let idx = idx.min(available.len().saturating_sub(1)); + perm.push(available.remove(idx)); + } + perm +} + +/// Check if a permutation is a valid directed Hamiltonian path. +/// +/// A valid directed Hamiltonian path visits every vertex exactly once and +/// every consecutive pair `(perm[i], perm[i+1])` must be an arc in the graph. +pub(crate) fn is_valid_directed_hamiltonian_path(graph: &DirectedGraph, perm: &[usize]) -> bool { + let n = graph.num_vertices(); + if perm.len() != n { + return false; + } + + // Check that perm is a valid permutation of 0..n + let mut seen = vec![false; n]; + for &v in perm { + if v >= n || seen[v] { + return false; + } + seen[v] = true; + } + + // Check that consecutive pairs are directed arcs + for i in 0..n.saturating_sub(1) { + if !graph.has_arc(perm[i], perm[i + 1]) { + return false; + } + } + + true +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_model_example_specs() -> Vec { + use crate::rules::ilp_helpers::permutation_to_lehmer; + + // 6 vertices, arcs from issue #813 + // Hamiltonian path: [0, 1, 3, 2, 4, 5] + let graph = DirectedGraph::new( + 6, + vec![ + (0, 1), + (0, 3), + (1, 3), + (1, 4), + (2, 0), + (2, 4), + (3, 2), + (3, 5), + (4, 5), + (5, 1), + ], + ); + let optimal_perm = vec![0usize, 1, 3, 2, 4, 5]; + let optimal_config = permutation_to_lehmer(&optimal_perm); + vec![crate::example_db::specs::ModelExampleSpec { + id: "directed_hamiltonian_path", + instance: Box::new(DirectedHamiltonianPath::new(graph)), + optimal_config, + optimal_value: serde_json::json!(true), + }] +} + +crate::declare_variants! { + default DirectedHamiltonianPath => "num_vertices^2 * 2^num_vertices", +} + +#[cfg(test)] +#[path = "../../unit_tests/models/graph/directed_hamiltonian_path.rs"] +mod tests; diff --git a/src/models/graph/mod.rs b/src/models/graph/mod.rs index 36e98e3d..54ed94ca 100644 --- a/src/models/graph/mod.rs +++ b/src/models/graph/mod.rs @@ -2,6 +2,7 @@ //! //! Problems whose input is a graph (optionally weighted): //! - [`AcyclicPartition`]: Partition a digraph into bounded-weight groups with an acyclic quotient graph +//! - [`DirectedHamiltonianPath`]: Directed Hamiltonian path (decision problem) //! - [`MaximumIndependentSet`]: Maximum weight independent set //! - [`MaximalIS`]: Maximal independent set //! - [`MinimumVertexCover`]: Minimum weight vertex cover @@ -60,6 +61,7 @@ pub(crate) mod biclique_cover; pub(crate) mod biconnectivity_augmentation; pub(crate) mod bottleneck_traveling_salesman; pub(crate) mod bounded_component_spanning_forest; +pub(crate) mod directed_hamiltonian_path; pub(crate) mod directed_two_commodity_integral_flow; pub(crate) mod disjoint_connecting_paths; pub(crate) mod generalized_hex; @@ -115,6 +117,7 @@ pub use biclique_cover::BicliqueCover; pub use biconnectivity_augmentation::BiconnectivityAugmentation; pub use bottleneck_traveling_salesman::BottleneckTravelingSalesman; pub use bounded_component_spanning_forest::BoundedComponentSpanningForest; +pub use directed_hamiltonian_path::DirectedHamiltonianPath; pub use directed_two_commodity_integral_flow::DirectedTwoCommodityIntegralFlow; pub use disjoint_connecting_paths::DisjointConnectingPaths; pub use generalized_hex::GeneralizedHex; @@ -168,6 +171,7 @@ pub use undirected_two_commodity_integral_flow::UndirectedTwoCommodityIntegralFl pub(crate) fn canonical_model_example_specs() -> Vec { let mut specs = Vec::new(); specs.extend(acyclic_partition::canonical_model_example_specs()); + specs.extend(directed_hamiltonian_path::canonical_model_example_specs()); specs.extend(maximum_independent_set::canonical_model_example_specs()); specs.extend(minimum_vertex_cover::canonical_model_example_specs()); specs.extend(max_cut::canonical_model_example_specs()); diff --git a/src/models/mod.rs b/src/models/mod.rs index 65088314..88d02c8e 100644 --- a/src/models/mod.rs +++ b/src/models/mod.rs @@ -21,9 +21,9 @@ pub use formula::{ }; pub use graph::{ AcyclicPartition, BalancedCompleteBipartiteSubgraph, BicliqueCover, BiconnectivityAugmentation, - BottleneckTravelingSalesman, BoundedComponentSpanningForest, DirectedTwoCommodityIntegralFlow, - DisjointConnectingPaths, GeneralizedHex, HamiltonianCircuit, HamiltonianPath, - IntegralFlowBundles, IntegralFlowHomologousArcs, IntegralFlowWithMultipliers, + BottleneckTravelingSalesman, BoundedComponentSpanningForest, DirectedHamiltonianPath, + DirectedTwoCommodityIntegralFlow, DisjointConnectingPaths, GeneralizedHex, HamiltonianCircuit, + HamiltonianPath, IntegralFlowBundles, IntegralFlowHomologousArcs, IntegralFlowWithMultipliers, IsomorphicSpanningTree, KClique, KColoring, KthBestSpanningTree, LengthBoundedDisjointPaths, LongestCircuit, LongestPath, MaxCut, MaximalIS, MaximumClique, MaximumIndependentSet, MaximumMatching, MinMaxMulticenter, MinimumCutIntoBoundedSets, MinimumDominatingSet, diff --git a/src/rules/directedhamiltonianpath_ilp.rs b/src/rules/directedhamiltonianpath_ilp.rs new file mode 100644 index 00000000..33a7040c --- /dev/null +++ b/src/rules/directedhamiltonianpath_ilp.rs @@ -0,0 +1,122 @@ +//! Reduction from DirectedHamiltonianPath to ILP (Integer Linear Programming). +//! +//! Position-assignment formulation: +//! - Binary x_{v,k}: vertex v at position k, total n^2 variables +//! - Assignment: each vertex in exactly one position, each position exactly one vertex +//! - Arc existence: for each consecutive position pair (k, k+1), any pair (v, w) where +//! (v, w) is NOT a directed arc is forbidden: x_{v,k} + x_{w,k+1} <= 1 + +use crate::models::algebraic::{LinearConstraint, ObjectiveSense, ILP}; +use crate::models::graph::DirectedHamiltonianPath; +use crate::reduction; +use crate::rules::ilp_helpers::{ + one_hot_assignment_constraints, one_hot_decode, permutation_to_lehmer, +}; +use crate::rules::traits::{ReduceTo, ReductionResult}; + +/// Result of reducing DirectedHamiltonianPath to ILP. +/// +/// Variable layout (all binary): +/// - `x_{v,k}` at index `v * n + k` for `v, k in 0..n` +#[derive(Debug, Clone)] +pub struct ReductionDirectedHamiltonianPathToILP { + target: ILP, + num_vertices: usize, +} + +impl ReductionResult for ReductionDirectedHamiltonianPathToILP { + type Source = DirectedHamiltonianPath; + type Target = ILP; + + fn target_problem(&self) -> &ILP { + &self.target + } + + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + let n = self.num_vertices; + // Decode one-hot assignment: permutation[k] = v where x_{v,k} = 1 + let perm = one_hot_decode(target_solution, n, n, 0); + permutation_to_lehmer(&perm) + } +} + +#[reduction( + overhead = { + num_vars = "num_vertices^2", + num_constraints = "2 * num_vertices + (num_vertices - 1) * (num_vertices^2 - num_arcs)", + } +)] +impl ReduceTo> for DirectedHamiltonianPath { + type Result = ReductionDirectedHamiltonianPathToILP; + + fn reduce_to(&self) -> Self::Result { + let n = self.num_vertices(); + let arcs = self.graph().arcs(); + + // Build arc set for fast lookup + let mut arc_set = std::collections::HashSet::new(); + for (u, v) in &arcs { + arc_set.insert((*u, *v)); + } + + let x_idx = |v: usize, k: usize| -> usize { v * n + k }; + + let mut constraints = Vec::new(); + + // (1) Assignment: each vertex at exactly one position, each position exactly one vertex + // Both row-wise (vertex) and column-wise (position) equality constraints + constraints.extend(one_hot_assignment_constraints(n, n, 0)); + // The helper adds: each item in exactly one slot (row equality), each slot at most one item + // But we need each slot exactly one item. Upgrade le to eq for the column constraints: + // one_hot_assignment_constraints gives: row eq + col le + // We need col eq, so add col ge (col le + col ge = col eq) + for k in 0..n { + let terms: Vec<(usize, f64)> = (0..n).map(|v| (x_idx(v, k), 1.0)).collect(); + constraints.push(LinearConstraint::ge(terms, 1.0)); + } + + // (2) Arc existence: for each consecutive position pair (k, k+1), + // forbid (v, w) pairs that are NOT arcs: x_{v,k} + x_{w,k+1} <= 1 + if n >= 2 { + for k in 0..n - 1 { + for v in 0..n { + for w in 0..n { + if !arc_set.contains(&(v, w)) { + constraints.push(LinearConstraint::le( + vec![(x_idx(v, k), 1.0), (x_idx(w, k + 1), 1.0)], + 1.0, + )); + } + } + } + } + } + + // Feasibility objective + let target = ILP::new(n * n, constraints, vec![], ObjectiveSense::Minimize); + + ReductionDirectedHamiltonianPathToILP { + target, + num_vertices: n, + } + } +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_rule_example_specs() -> Vec { + vec![crate::example_db::specs::RuleExampleSpec { + id: "directedhamiltonianpath_to_ilp", + build: || { + // Simple directed path: 0->1->2->3 + let source = DirectedHamiltonianPath::new(crate::topology::DirectedGraph::new( + 4, + vec![(0, 1), (1, 2), (2, 3)], + )); + crate::example_db::specs::rule_example_via_ilp::<_, bool>(source) + }, + }] +} + +#[cfg(test)] +#[path = "../unit_tests/rules/directedhamiltonianpath_ilp.rs"] +mod tests; diff --git a/src/rules/mod.rs b/src/rules/mod.rs index 17e91265..a7057952 100644 --- a/src/rules/mod.rs +++ b/src/rules/mod.rs @@ -102,6 +102,8 @@ pub(crate) mod consecutiveonessubmatrix_ilp; #[cfg(feature = "ilp-solver")] pub(crate) mod consistencyofdatabasefrequencytables_ilp; #[cfg(feature = "ilp-solver")] +pub(crate) mod directedhamiltonianpath_ilp; +#[cfg(feature = "ilp-solver")] pub(crate) mod directedtwocommodityintegralflow_ilp; #[cfg(feature = "ilp-solver")] pub(crate) mod disjointconnectingpaths_ilp; @@ -334,6 +336,7 @@ pub(crate) fn canonical_rule_example_specs() -> Vec Vec { + permutation_to_lehmer(perm) +} + +#[test] +fn test_directed_hamiltonian_path_creation() { + // Simple directed path: 0->1->2->3 + let graph = DirectedGraph::new(4, vec![(0, 1), (1, 2), (2, 3)]); + let problem = DirectedHamiltonianPath::new(graph); + assert_eq!(problem.num_vertices(), 4); + assert_eq!(problem.num_arcs(), 3); + // Lehmer dims: [4, 3, 2, 1] + assert_eq!(problem.dims(), vec![4, 3, 2, 1]); +} + +#[test] +fn test_directed_hamiltonian_path_evaluate_valid() { + // Directed path: 0->1->2->3 + let graph = DirectedGraph::new(4, vec![(0, 1), (1, 2), (2, 3)]); + let problem = DirectedHamiltonianPath::new(graph); + + // Path [0, 1, 2, 3]: Lehmer code [0, 0, 0, 0] + assert_eq!( + problem.evaluate(&encode(&[0, 1, 2, 3])), + crate::types::Or(true) + ); + // Path [3, 2, 1, 0]: no arcs in reverse, invalid + assert_eq!( + problem.evaluate(&encode(&[3, 2, 1, 0])), + crate::types::Or(false) + ); +} + +#[test] +fn test_directed_hamiltonian_path_evaluate_invalid_no_arc() { + // Only arc 0->1 and 2->3, not 1->2 + let graph = DirectedGraph::new(4, vec![(0, 1), (2, 3)]); + let problem = DirectedHamiltonianPath::new(graph); + // No Hamiltonian path should be valid + let solver = BruteForce::new(); + assert!(solver.find_witness(&problem).is_none()); +} + +#[test] +fn test_directed_hamiltonian_path_brute_force() { + // Simple directed path graph + let graph = DirectedGraph::new(4, vec![(0, 1), (1, 2), (2, 3)]); + let problem = DirectedHamiltonianPath::new(graph); + let solver = BruteForce::new(); + let solution = solver + .find_witness(&problem) + .expect("should have a Hamiltonian path"); + assert_eq!(problem.evaluate(&solution), crate::types::Or(true)); +} + +#[test] +fn test_directed_hamiltonian_path_issue_example() { + // 6-vertex example from issue #813 + // Arcs: [(0,1),(0,3),(1,3),(1,4),(2,0),(2,4),(3,2),(3,5),(4,5),(5,1)] + // Hamiltonian path: [0, 1, 3, 2, 4, 5] + let graph = DirectedGraph::new( + 6, + vec![ + (0, 1), + (0, 3), + (1, 3), + (1, 4), + (2, 0), + (2, 4), + (3, 2), + (3, 5), + (4, 5), + (5, 1), + ], + ); + let problem = DirectedHamiltonianPath::new(graph); + let path = vec![0usize, 1, 3, 2, 4, 5]; + assert_eq!( + problem.evaluate(&encode(&path)), + crate::types::Or(true), + "Path [0,1,3,2,4,5] should be a valid Hamiltonian path" + ); +} + +#[test] +fn test_directed_hamiltonian_path_no_solution() { + // Directed graph with no Hamiltonian path: 0->1, 0->2, no outgoing from 1 or 2 + let graph = DirectedGraph::new(3, vec![(0, 1), (0, 2)]); + let problem = DirectedHamiltonianPath::new(graph); + let solver = BruteForce::new(); + assert!(solver.find_witness(&problem).is_none()); +} + +#[test] +fn test_directed_hamiltonian_path_single_vertex() { + let graph = DirectedGraph::new(1, vec![]); + let problem = DirectedHamiltonianPath::new(graph); + // Single vertex: trivially Hamiltonian + assert_eq!(problem.evaluate(&[0]), crate::types::Or(true)); + let solver = BruteForce::new(); + let sol = solver.find_witness(&problem); + assert!(sol.is_some()); +} + +#[test] +fn test_directed_hamiltonian_path_serialization() { + let graph = DirectedGraph::new(3, vec![(0, 1), (1, 2)]); + let problem = DirectedHamiltonianPath::new(graph); + let json = serde_json::to_value(&problem).unwrap(); + let deserialized: DirectedHamiltonianPath = serde_json::from_value(json).unwrap(); + assert_eq!(deserialized.num_vertices(), 3); + assert_eq!(deserialized.num_arcs(), 2); +} + +#[test] +fn test_is_valid_solution() { + let graph = DirectedGraph::new(3, vec![(0, 1), (1, 2)]); + let problem = DirectedHamiltonianPath::new(graph); + // Valid: path [0, 1, 2] + assert!(problem.is_valid_solution(&encode(&[0, 1, 2]))); + // Invalid: path [0, 2, 1] (no arc 0->2) + assert!(!problem.is_valid_solution(&encode(&[0, 2, 1]))); +} + +#[test] +fn test_size_getters() { + let graph = DirectedGraph::new(5, vec![(0, 1), (1, 2), (2, 3), (3, 4)]); + let problem = DirectedHamiltonianPath::new(graph); + assert_eq!(problem.num_vertices(), 5); + assert_eq!(problem.num_arcs(), 4); + // Lehmer dims: [5, 4, 3, 2, 1] + assert_eq!(problem.dims(), vec![5, 4, 3, 2, 1]); +} + +#[test] +fn test_decode_lehmer_identity() { + // Lehmer code [0,0,...,0] should decode to identity permutation [0,1,...,n-1] + let code = vec![0usize; 5]; + let perm = decode_lehmer(&code); + assert_eq!(perm, vec![0, 1, 2, 3, 4]); +} diff --git a/src/unit_tests/rules/directedhamiltonianpath_ilp.rs b/src/unit_tests/rules/directedhamiltonianpath_ilp.rs new file mode 100644 index 00000000..e13a8532 --- /dev/null +++ b/src/unit_tests/rules/directedhamiltonianpath_ilp.rs @@ -0,0 +1,104 @@ +use super::*; +use crate::models::graph::DirectedHamiltonianPath; +use crate::solvers::{BruteForce, ILPSolver}; +use crate::topology::DirectedGraph; +use crate::traits::Problem; +use crate::types::Or; + +#[test] +fn test_reduction_creates_valid_ilp() { + // Directed path: 0->1->2 (n=3) + let graph = DirectedGraph::new(3, vec![(0, 1), (1, 2)]); + let problem = DirectedHamiltonianPath::new(graph); + let reduction: ReductionDirectedHamiltonianPathToILP = + ReduceTo::>::reduce_to(&problem); + let ilp = reduction.target_problem(); + // n=3, num_vars = 3^2 = 9 + assert_eq!(ilp.num_vars, 9); + assert_eq!(ilp.sense, ObjectiveSense::Minimize); +} + +#[test] +fn test_directedhamiltonianpath_to_ilp_closed_loop() { + // Directed path: 0->1->2->3 + let graph = DirectedGraph::new(4, vec![(0, 1), (1, 2), (2, 3)]); + let problem = DirectedHamiltonianPath::new(graph); + + // BruteForce to verify feasibility + let bf = BruteForce::new(); + let bf_solution = bf + .find_witness(&problem) + .expect("brute-force should find a solution"); + assert_eq!(problem.evaluate(&bf_solution), Or(true)); + + // Solve via ILP + let reduction: ReductionDirectedHamiltonianPathToILP = + ReduceTo::>::reduce_to(&problem); + let ilp_solver = ILPSolver::new(); + let ilp_solution = ilp_solver + .solve(reduction.target_problem()) + .expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + assert_eq!( + problem.evaluate(&extracted), + Or(true), + "ILP solution should satisfy the DirectedHamiltonianPath constraint" + ); +} + +#[test] +fn test_directedhamiltonianpath_to_ilp_issue_example() { + // 6-vertex example from issue #813 + let graph = DirectedGraph::new( + 6, + vec![ + (0, 1), + (0, 3), + (1, 3), + (1, 4), + (2, 0), + (2, 4), + (3, 2), + (3, 5), + (4, 5), + (5, 1), + ], + ); + let problem = DirectedHamiltonianPath::new(graph); + let reduction: ReductionDirectedHamiltonianPathToILP = + ReduceTo::>::reduce_to(&problem); + let ilp_solver = ILPSolver::new(); + let ilp_solution = ilp_solver + .solve(reduction.target_problem()) + .expect("ILP should find a path"); + let extracted = reduction.extract_solution(&ilp_solution); + assert_eq!( + problem.evaluate(&extracted), + Or(true), + "ILP solution should be a valid directed Hamiltonian path" + ); +} + +#[test] +fn test_directedhamiltonianpath_to_ilp_no_path() { + // No Hamiltonian path: 0->1, 0->2, but no outgoing arcs from 1 or 2 + let graph = DirectedGraph::new(3, vec![(0, 1), (0, 2)]); + let problem = DirectedHamiltonianPath::new(graph); + let reduction: ReductionDirectedHamiltonianPathToILP = + ReduceTo::>::reduce_to(&problem); + let ilp_solver = ILPSolver::new(); + let result = ilp_solver.solve(reduction.target_problem()); + assert!( + result.is_none(), + "Graph with no Hamiltonian path should be infeasible" + ); +} + +#[test] +fn test_directedhamiltonianpath_to_ilp_bf_vs_ilp() { + let graph = DirectedGraph::new(4, vec![(0, 1), (1, 2), (2, 3)]); + let problem = DirectedHamiltonianPath::new(graph); + let reduction: ReductionDirectedHamiltonianPathToILP = + ReduceTo::>::reduce_to(&problem); + crate::rules::test_helpers::assert_bf_vs_ilp(&problem, &reduction); +} From 850aa3c6ec0d272ae164e11bc4f9b315348789b1 Mon Sep 17 00:00:00 2001 From: Xiwei Pan Date: Mon, 30 Mar 2026 09:09:12 +0800 Subject: [PATCH 11/21] feat: add SetSplitting model (#830) Implement hypergraph 2-colorability / set splitting feasibility problem (GJ SP4) with ILP reduction, CLI support, canonical example, and paper entry. Co-Authored-By: Claude Opus 4.6 (1M context) --- docs/paper/reductions.typ | 64 +++++++ problemreductions-cli/src/commands/create.rs | 16 ++ src/lib.rs | 2 +- src/models/mod.rs | 2 +- src/models/set/mod.rs | 4 + src/models/set/set_splitting.rs | 173 +++++++++++++++++++ src/rules/mod.rs | 3 + src/rules/setsplitting_ilp.rs | 81 +++++++++ src/unit_tests/models/set/set_splitting.rs | 114 ++++++++++++ src/unit_tests/rules/setsplitting_ilp.rs | 101 +++++++++++ 10 files changed, 558 insertions(+), 2 deletions(-) create mode 100644 src/models/set/set_splitting.rs create mode 100644 src/rules/setsplitting_ilp.rs create mode 100644 src/unit_tests/models/set/set_splitting.rs create mode 100644 src/unit_tests/rules/setsplitting_ilp.rs diff --git a/docs/paper/reductions.typ b/docs/paper/reductions.typ index c1f02bc8..bfb9edde 100644 --- a/docs/paper/reductions.typ +++ b/docs/paper/reductions.typ @@ -108,6 +108,7 @@ "MinimumSetCovering": [Minimum Set Covering], "ComparativeContainment": [Comparative Containment], "SetBasis": [Set Basis], + "SetSplitting": [Set Splitting], "MinimumCardinalityKey": [Minimum Cardinality Key], "SpinGlass": [Spin Glass], "QUBO": [QUBO], @@ -2640,6 +2641,53 @@ A classical NP-complete problem from Garey and Johnson @garey1979[Ch.~3, p.~76], ] } +#{ + let x = load-model-example("SetSplitting") + let subsets = x.instance.subsets + let m = subsets.len() + let n = x.instance.universe_size + let sol = (config: x.optimal_config, metric: x.optimal_value) + let part0 = sol.config.enumerate().filter(((i, v)) => v == 0).map(((i, _)) => i) + let part1 = sol.config.enumerate().filter(((i, v)) => v == 1).map(((i, _)) => i) + let fmt-set(s) = "${" + s.map(e => str(e + 1)).join(", ") + "}$" + let elems = ( + (-1.5, 0.0), + (0.0, 1.0), + (0.0, -1.0), + (1.5, 0.0), + (-0.5, -1.0), + (0.5, 1.0), + ) + [ + #problem-def("SetSplitting")[ + Given a finite universe $U$ and a collection $cal(C) = {C_1, dots, C_m}$ of subsets of $U$ each of size $gt.eq 2$, does there exist a 2-coloring $chi: U -> {0, 1}$ such that every $C_i$ is non-monochromatic — i.e., contains at least one element of each color? + ][ + One of Garey and Johnson's NP-complete problems (SP4 in @garey1979), shown NP-complete by reduction from 3-SAT. It is equivalent to deciding whether a hypergraph is 2-colorable (also called Property B). The problem is trivially satisfiable when every subset has size exactly 2, reducing to 2-colorability of the corresponding graph; it becomes NP-complete for subsets of size $gt.eq 3$. The best known exact algorithm runs in $O^*(2^n)$ by brute-force enumeration over the $n = |U|$ elements. + + *Example.* Let $U = {1, 2, dots, #n}$ and $cal(C) = {C_1, dots, C_#m}$ with #range(m).map(i => $C_#(i + 1) = #fmt-set(subsets.at(i))$).join(", "). Coloring $S_1 = #fmt-set(part0)$ and $S_2 = #fmt-set(part1)$ splits all subsets: each $C_i$ has at least one element in each part. + + #pred-commands( + "pred create --example SetSplitting -o set-splitting.json", + "pred solve set-splitting.json", + "pred evaluate set-splitting.json --config " + x.optimal_config.map(str).join(","), + ) + + #figure( + canvas(length: 1cm, { + for (i, subset) in subsets.enumerate() { + let positions = subset.map(e => elems.at(e)) + sregion(positions, pad: 0.42, label: [$C_#(i + 1)$], ..sregion-dimmed) + } + for (k, pos) in elems.enumerate() { + selem(pos, label: [#(k + 1)], fill: if part1.contains(k) { graph-colors.at(0) } else { white }) + } + }), + caption: [Set splitting: white elements $#fmt-set(part0)$ form $S_1$ and blue elements $#fmt-set(part1)$ form $S_2$; every subset $C_1, dots, C_#m$ contains both colors.] + ) + ] + ] +} + #{ let x = load-model-example("ConsecutiveSets") let m = x.instance.alphabet_size @@ -9219,6 +9267,22 @@ The following reductions to Integer Linear Programming are straightforward formu _Solution extraction._ Direct: $x_i = 1$ iff variable $i$ is true. ] +#reduction-rule("SetSplitting", "ILP")[ + Each subset must contain at least one element of each color, encoded as a pair of linear inequalities per subset. +][ + _Construction._ Variables: $x_i in {0, 1}$ for each $u_i in U$ ($x_i = 1$ means $u_i in S_2$). For each subset $C = {i_1, dots, i_k}$ the ILP enforces: + $ + "find" quad & bold(x) \ + "subject to" quad & sum_(j in C) x_j >= 1 quad "for each" C in cal(C) \ + & sum_(j in C) x_j <= |C| - 1 quad "for each" C in cal(C) \ + & x_i in {0, 1} quad forall i + $. + + _Correctness._ ($arrow.r.double$) A valid splitting has at least one element in $S_2$ ($sum >= 1$) and at least one in $S_1$ ($sum <= |C|-1$) for every $C$. ($arrow.l.double$) Any feasible ILP solution defines a valid 2-coloring. + + _Solution extraction._ $S_2 = {u_i : x_i = 1}$, $S_1 = U without S_2$. +] + #reduction-rule("KClique", "ILP")[ A $k$-clique requires at least $k$ selected vertices with no non-edge between any pair. ][ diff --git a/problemreductions-cli/src/commands/create.rs b/problemreductions-cli/src/commands/create.rs index e23d4585..47c345c2 100644 --- a/problemreductions-cli/src/commands/create.rs +++ b/problemreductions-cli/src/commands/create.rs @@ -769,6 +769,7 @@ fn example_for(canonical: &str, graph_type: Option<&str>) -> &'static str { "--universe 4 --r-sets \"0,1,2,3;0,1\" --s-sets \"0,1,2,3;2,3\" --r-weights 2,5 --s-weights 3,6" } "SetBasis" => "--universe 4 --sets \"0,1;1,2;0,2;0,1,2\" --k 3", + "SetSplitting" => "--universe 6 --sets \"0,1,2;2,3,4;0,4,5;1,3,5\"", "LongestCommonSubsequence" => { "--strings \"010110;100101;001011\" --alphabet-size 2" } @@ -2652,6 +2653,21 @@ pub fn create(args: &CreateArgs, out: &OutputConfig) -> Result<()> { ) } + // SetSplitting + "SetSplitting" => { + let universe = args.universe.ok_or_else(|| { + anyhow::anyhow!( + "SetSplitting requires --universe and --sets\n\n\ + Usage: pred create SetSplitting --universe 6 --sets \"0,1,2;2,3,4;0,4,5;1,3,5\"" + ) + })?; + let subsets = parse_sets(args)?; + ( + ser(SetSplitting::try_new(universe, subsets).map_err(anyhow::Error::msg)?)?, + resolved_variant.clone(), + ) + } + // MinimumHittingSet "MinimumHittingSet" => { let universe = args.universe.ok_or_else(|| { diff --git a/src/lib.rs b/src/lib.rs index 8a99b593..6c1af959 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -85,7 +85,7 @@ pub mod prelude { pub use crate::models::set::{ ComparativeContainment, ConsecutiveSets, ExactCoverBy3Sets, IntegerKnapsack, MaximumSetPacking, MinimumCardinalityKey, MinimumHittingSet, MinimumSetCovering, - PrimeAttributeName, RootedTreeStorageAssignment, SetBasis, + PrimeAttributeName, RootedTreeStorageAssignment, SetBasis, SetSplitting, }; // Core traits diff --git a/src/models/mod.rs b/src/models/mod.rs index 88d02c8e..74584b00 100644 --- a/src/models/mod.rs +++ b/src/models/mod.rs @@ -56,5 +56,5 @@ pub use misc::{ pub use set::{ ComparativeContainment, ConsecutiveSets, ExactCoverBy3Sets, IntegerKnapsack, MaximumSetPacking, MinimumCardinalityKey, MinimumHittingSet, MinimumSetCovering, PrimeAttributeName, - RootedTreeStorageAssignment, SetBasis, TwoDimensionalConsecutiveSets, + RootedTreeStorageAssignment, SetBasis, SetSplitting, TwoDimensionalConsecutiveSets, }; diff --git a/src/models/set/mod.rs b/src/models/set/mod.rs index cd71cd52..785fde23 100644 --- a/src/models/set/mod.rs +++ b/src/models/set/mod.rs @@ -10,6 +10,7 @@ //! - [`MinimumSetCovering`]: Minimum weight set cover //! - [`PrimeAttributeName`]: Determine if an attribute belongs to any candidate key //! - [`RootedTreeStorageAssignment`]: Extend subsets to directed tree paths within a total-cost bound +//! - [`SetSplitting`]: 2-color universe so every specified subset is non-monochromatic pub(crate) mod comparative_containment; pub(crate) mod consecutive_sets; @@ -22,6 +23,7 @@ pub(crate) mod minimum_set_covering; pub(crate) mod prime_attribute_name; pub(crate) mod rooted_tree_storage_assignment; pub(crate) mod set_basis; +pub(crate) mod set_splitting; pub(crate) mod two_dimensional_consecutive_sets; pub use comparative_containment::ComparativeContainment; @@ -35,6 +37,7 @@ pub use minimum_set_covering::MinimumSetCovering; pub use prime_attribute_name::PrimeAttributeName; pub use rooted_tree_storage_assignment::RootedTreeStorageAssignment; pub use set_basis::SetBasis; +pub use set_splitting::SetSplitting; pub use two_dimensional_consecutive_sets::TwoDimensionalConsecutiveSets; #[cfg(feature = "example-db")] @@ -51,6 +54,7 @@ pub(crate) fn canonical_model_example_specs() -> Vec>", description: "Subsets that must each contain elements from both parts" }, + ], + } +} + +/// The Set Splitting problem. +/// +/// Given a finite universe $U = \{0, \ldots, n-1\}$ and a collection +/// $\mathcal{C}$ of subsets of $U$, decide whether there exists a +/// 2-coloring (partition into $S_1$ and $S_2$) of $U$ such that every +/// subset in $\mathcal{C}$ is non-monochromatic, i.e., contains at +/// least one element from each part. +/// +/// # Example +/// +/// ``` +/// use problemreductions::models::set::SetSplitting; +/// use problemreductions::{Problem, Solver, BruteForce}; +/// +/// // Universe {0,1,2,3,4,5}, subsets that all must be split +/// let problem = SetSplitting::new(6, vec![ +/// vec![0, 1, 2], +/// vec![2, 3, 4], +/// vec![0, 4, 5], +/// vec![1, 3, 5], +/// ]); +/// +/// let solver = BruteForce::new(); +/// let witness = solver.find_witness(&problem); +/// assert!(witness.is_some()); +/// ``` +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(try_from = "SetSplittingDef")] +pub struct SetSplitting { + /// Size of the universe. + universe_size: usize, + /// Subsets that must each contain elements from both parts. + subsets: Vec>, +} + +impl SetSplitting { + /// Create a new Set Splitting problem. + /// + /// # Panics + /// + /// Panics if any subset is empty, has fewer than 2 elements, or contains an + /// element outside the universe. + pub fn new(universe_size: usize, subsets: Vec>) -> Self { + Self::try_new(universe_size, subsets).unwrap_or_else(|err| panic!("{err}")) + } + + /// Create a new Set Splitting problem, returning an error instead of panicking. + pub fn try_new(universe_size: usize, subsets: Vec>) -> Result { + for (i, subset) in subsets.iter().enumerate() { + if subset.len() < 2 { + return Err(format!( + "Subset {} has {} element(s), expected at least 2", + i, + subset.len() + )); + } + for &elem in subset { + if elem >= universe_size { + return Err(format!( + "Subset {} contains element {} which is outside universe of size {}", + i, elem, universe_size + )); + } + } + } + Ok(Self { + universe_size, + subsets, + }) + } + + /// Get the size of the universe. + pub fn universe_size(&self) -> usize { + self.universe_size + } + + /// Get the number of subsets. + pub fn num_subsets(&self) -> usize { + self.subsets.len() + } + + /// Get the subsets. + pub fn subsets(&self) -> &[Vec] { + &self.subsets + } + + /// Check if a coloring (config) splits all subsets. + pub fn is_valid_solution(&self, config: &[usize]) -> bool { + self.evaluate(config).0 + } +} + +impl Problem for SetSplitting { + const NAME: &'static str = "SetSplitting"; + type Value = crate::types::Or; + + fn dims(&self) -> Vec { + vec![2; self.universe_size] + } + + fn evaluate(&self, config: &[usize]) -> crate::types::Or { + crate::types::Or(self.subsets.iter().all(|subset| { + let has_zero = subset.iter().any(|&e| config[e] == 0); + let has_one = subset.iter().any(|&e| config[e] == 1); + has_zero && has_one + })) + } + + fn variant() -> Vec<(&'static str, &'static str)> { + crate::variant_params![] + } +} + +crate::declare_variants! { + default SetSplitting => "2^universe_size", +} + +#[derive(Debug, Clone, Deserialize)] +struct SetSplittingDef { + universe_size: usize, + subsets: Vec>, +} + +impl TryFrom for SetSplitting { + type Error = String; + + fn try_from(value: SetSplittingDef) -> Result { + Self::try_new(value.universe_size, value.subsets) + } +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_model_example_specs() -> Vec { + vec![crate::example_db::specs::ModelExampleSpec { + id: "set_splitting", + instance: Box::new(SetSplitting::new( + 6, + vec![vec![0, 1, 2], vec![2, 3, 4], vec![0, 4, 5], vec![1, 3, 5]], + )), + // config[i]=0 means element i in S1, config[i]=1 means element i in S2 + // S1={1,3,4}, S2={0,2,5} → config [1,0,1,0,0,1] + optimal_config: vec![1, 0, 1, 0, 0, 1], + optimal_value: serde_json::json!(true), + }] +} + +#[cfg(test)] +#[path = "../../unit_tests/models/set/set_splitting.rs"] +mod tests; diff --git a/src/rules/mod.rs b/src/rules/mod.rs index a7057952..8a726061 100644 --- a/src/rules/mod.rs +++ b/src/rules/mod.rs @@ -232,6 +232,8 @@ pub(crate) mod sequencingwithinintervals_ilp; #[cfg(feature = "ilp-solver")] pub(crate) mod sequencingwithreleasetimesanddeadlines_ilp; #[cfg(feature = "ilp-solver")] +pub(crate) mod setsplitting_ilp; +#[cfg(feature = "ilp-solver")] pub(crate) mod shortestcommonsupersequence_ilp; #[cfg(feature = "ilp-solver")] pub(crate) mod shortestweightconstrainedpath_ilp; @@ -402,6 +404,7 @@ pub(crate) fn canonical_rule_example_specs() -> Vec, +} + +impl ReductionResult for ReductionSetSplittingToILP { + type Source = SetSplitting; + type Target = ILP; + + fn target_problem(&self) -> &ILP { + &self.target + } + + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + target_solution.to_vec() + } +} + +#[reduction( + overhead = { + num_vars = "universe_size", + num_constraints = "2 * num_subsets", + } +)] +impl ReduceTo> for SetSplitting { + type Result = ReductionSetSplittingToILP; + + fn reduce_to(&self) -> Self::Result { + let num_vars = self.universe_size(); + let mut constraints = Vec::new(); + + for subset in self.subsets() { + let k = subset.len(); + let terms: Vec<(usize, f64)> = subset.iter().map(|&e| (e, 1.0)).collect(); + + // At least one element in S2: sum >= 1 + constraints.push(LinearConstraint::ge(terms.clone(), 1.0)); + + // At least one element in S1: sum <= k - 1 + constraints.push(LinearConstraint::le(terms, (k - 1) as f64)); + } + + let target = ILP::new(num_vars, constraints, vec![], ObjectiveSense::Minimize); + ReductionSetSplittingToILP { target } + } +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_rule_example_specs() -> Vec { + vec![crate::example_db::specs::RuleExampleSpec { + id: "setsplitting_to_ilp", + build: || { + let source = SetSplitting::new( + 6, + vec![vec![0, 1, 2], vec![2, 3, 4], vec![0, 4, 5], vec![1, 3, 5]], + ); + crate::example_db::specs::rule_example_via_ilp::<_, bool>(source) + }, + }] +} + +#[cfg(test)] +#[path = "../unit_tests/rules/setsplitting_ilp.rs"] +mod tests; diff --git a/src/unit_tests/models/set/set_splitting.rs b/src/unit_tests/models/set/set_splitting.rs new file mode 100644 index 00000000..fb197e92 --- /dev/null +++ b/src/unit_tests/models/set/set_splitting.rs @@ -0,0 +1,114 @@ +use super::*; +use crate::solvers::BruteForce; +use crate::traits::Problem; +use crate::types::Or; + +#[test] +fn test_set_splitting_creation() { + let problem = SetSplitting::new(4, vec![vec![0, 1], vec![1, 2], vec![2, 3]]); + assert_eq!(problem.universe_size(), 4); + assert_eq!(problem.num_subsets(), 3); + assert_eq!(problem.num_variables(), 4); +} + +#[test] +fn test_set_splitting_getters() { + let subsets = vec![vec![0, 1, 2], vec![1, 2, 3]]; + let problem = SetSplitting::new(4, subsets.clone()); + assert_eq!(problem.subsets(), subsets.as_slice()); + assert_eq!(problem.num_subsets(), 2); +} + +#[test] +fn test_set_splitting_evaluate_valid() { + // Universe {0,1,2,3}, one subset {0,1,2,3} + // config [0,0,1,1] → subset has {0,1} in S1 and {2,3} in S2 → split + let problem = SetSplitting::new(4, vec![vec![0, 1, 2, 3]]); + assert_eq!(problem.evaluate(&[0, 0, 1, 1]), Or(true)); +} + +#[test] +fn test_set_splitting_evaluate_monochromatic() { + // All elements colored 0 — subset is entirely in S1 → not split + let problem = SetSplitting::new(4, vec![vec![0, 1, 2]]); + assert_eq!(problem.evaluate(&[0, 0, 0, 0]), Or(false)); +} + +#[test] +fn test_set_splitting_evaluate_multiple_subsets() { + // Universe {0..5}, 4 subsets from canonical example + let problem = SetSplitting::new( + 6, + vec![vec![0, 1, 2], vec![2, 3, 4], vec![0, 4, 5], vec![1, 3, 5]], + ); + // config [1,0,1,0,0,1]: S1={1,3,4}, S2={0,2,5} + let config = vec![1, 0, 1, 0, 0, 1]; + assert_eq!(problem.evaluate(&config), Or(true)); + + // All 0: every subset is monochromatic + let all_zero = vec![0, 0, 0, 0, 0, 0]; + assert_eq!(problem.evaluate(&all_zero), Or(false)); +} + +#[test] +fn test_set_splitting_is_valid_solution() { + let problem = SetSplitting::new(4, vec![vec![0, 1], vec![2, 3]]); + assert!(problem.is_valid_solution(&[0, 1, 0, 1])); + assert!(!problem.is_valid_solution(&[0, 0, 0, 0])); +} + +#[test] +fn test_set_splitting_brute_force_feasible() { + let problem = SetSplitting::new( + 6, + vec![vec![0, 1, 2], vec![2, 3, 4], vec![0, 4, 5], vec![1, 3, 5]], + ); + let solver = BruteForce::new(); + let witness = solver.find_witness(&problem); + assert!(witness.is_some()); + let w = witness.unwrap(); + assert_eq!(problem.evaluate(&w), Or(true)); +} + +#[test] +fn test_set_splitting_brute_force_infeasible() { + // Single subset {0,1} with only 2 elements — either 0 or 1 must differ + // Actually {0,1} is always splittable: just color 0→0, 1→1. + // Infeasible instance: single element subset is rejected by constructor (< 2 elems). + // An instance with contradictory constraints: + // U={0}, subset {0,0} is rejected (too few distinct, but actually passes len check) + // Really simplest infeasible: universe_size=1, subset {0,0} has length 2 but only 1 unique. + // Both elements map to index 0 → sum is either 0 (all S1) or 2 (all S2), never split. + // config [0]: elem 0 → S1. subset needs both colors but only has elem 0 twice → impossible. + let problem = SetSplitting::new(1, vec![vec![0, 0]]); + let solver = BruteForce::new(); + let witness = solver.find_witness(&problem); + assert!( + witness.is_none(), + "single-element universe cannot split {{0,0}}" + ); +} + +#[test] +fn test_set_splitting_serialization() { + let problem = SetSplitting::new(4, vec![vec![0, 1], vec![2, 3]]); + let json = serde_json::to_string(&problem).unwrap(); + let deserialized: SetSplitting = serde_json::from_str(&json).unwrap(); + assert_eq!(deserialized.universe_size(), 4); + assert_eq!(deserialized.num_subsets(), 2); + assert_eq!(deserialized.subsets(), problem.subsets()); +} + +#[test] +fn test_set_splitting_try_new_invalid_element() { + let result = SetSplitting::try_new(3, vec![vec![0, 5]]); + assert!(result.is_err()); + assert!(result.unwrap_err().contains("outside universe")); +} + +#[test] +fn test_set_splitting_try_new_too_small_subset() { + let result = SetSplitting::try_new(3, vec![vec![0]]); + assert!(result.is_err()); + assert!(result.unwrap_err().contains("at least 2")); +} diff --git a/src/unit_tests/rules/setsplitting_ilp.rs b/src/unit_tests/rules/setsplitting_ilp.rs new file mode 100644 index 00000000..b15762c8 --- /dev/null +++ b/src/unit_tests/rules/setsplitting_ilp.rs @@ -0,0 +1,101 @@ +use super::*; +use crate::solvers::{BruteForce, ILPSolver}; +use crate::traits::Problem; +use crate::types::Or; + +#[test] +fn test_reduction_creates_valid_ilp() { + // Universe {0,1,2}, subset {0,1,2} + let problem = SetSplitting::new(3, vec![vec![0, 1, 2]]); + let reduction: ReductionSetSplittingToILP = ReduceTo::>::reduce_to(&problem); + let ilp = reduction.target_problem(); + + assert_eq!(ilp.num_vars, 3, "one ILP var per universe element"); + assert_eq!( + ilp.constraints.len(), + 2, + "two constraints per subset (ge + le)" + ); + assert_eq!(ilp.sense, ObjectiveSense::Minimize); + assert!(ilp.objective.is_empty(), "feasibility: no objective terms"); +} + +#[test] +fn test_reduction_constraint_structure() { + // Subset {0,1,2}: need sum >= 1 and sum <= 2 + let problem = SetSplitting::new(3, vec![vec![0, 1, 2]]); + let reduction: ReductionSetSplittingToILP = ReduceTo::>::reduce_to(&problem); + let ilp = reduction.target_problem(); + + // One ge constraint (rhs=1) and one le constraint (rhs=2) + let ge_constraints: Vec<_> = ilp.constraints.iter().filter(|c| c.rhs == 1.0).collect(); + let le_constraints: Vec<_> = ilp.constraints.iter().filter(|c| c.rhs == 2.0).collect(); + assert_eq!(ge_constraints.len(), 1); + assert_eq!(le_constraints.len(), 1); +} + +#[test] +fn test_setsplitting_to_ilp_closed_loop() { + // Canonical 4-subset instance, feasible + let problem = SetSplitting::new( + 6, + vec![vec![0, 1, 2], vec![2, 3, 4], vec![0, 4, 5], vec![1, 3, 5]], + ); + let reduction: ReductionSetSplittingToILP = ReduceTo::>::reduce_to(&problem); + let ilp = reduction.target_problem(); + + let ilp_solver = ILPSolver::new(); + let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be feasible"); + let extracted = reduction.extract_solution(&ilp_solution); + + assert_eq!( + problem.evaluate(&extracted), + Or(true), + "extracted solution must split all subsets" + ); +} + +#[test] +fn test_setsplitting_to_ilp_infeasible() { + // Single-element universe, subset {0,0}: sum(x_0) >= 1 and sum(x_0) <= 0 — contradiction + let problem = SetSplitting::new(1, vec![vec![0, 0]]); + let reduction: ReductionSetSplittingToILP = ReduceTo::>::reduce_to(&problem); + let ilp = reduction.target_problem(); + + let ilp_solver = ILPSolver::new(); + assert!( + ilp_solver.solve(ilp).is_none(), + "ILP should be infeasible for unsplittable instance" + ); +} + +#[test] +fn test_setsplitting_bf_vs_ilp() { + let problem = SetSplitting::new(4, vec![vec![0, 1], vec![1, 2], vec![2, 3]]); + let bf = BruteForce::new(); + let ilp_solver = ILPSolver::new(); + + let bf_witness = bf.find_witness(&problem); + assert!(bf_witness.is_some()); + let bf_result = problem.evaluate(&bf_witness.unwrap()); + + let reduction: ReductionSetSplittingToILP = ReduceTo::>::reduce_to(&problem); + let ilp_solution = ilp_solver + .solve(reduction.target_problem()) + .expect("ILP should be feasible"); + let extracted = reduction.extract_solution(&ilp_solution); + let ilp_result = problem.evaluate(&extracted); + + assert_eq!(bf_result, ilp_result, "BruteForce and ILP must agree"); + assert_eq!(ilp_result, Or(true)); +} + +#[test] +fn test_overhead_dimensions() { + // 5 elements, 3 subsets → 5 vars, 6 constraints + let problem = SetSplitting::new(5, vec![vec![0, 1], vec![2, 3], vec![0, 4]]); + let reduction: ReductionSetSplittingToILP = ReduceTo::>::reduce_to(&problem); + let ilp = reduction.target_problem(); + assert_eq!(ilp.num_vars, 5); + assert_eq!(ilp.constraints.len(), 6); // 2 per subset +} From a0071b8e63400f033b91cc883750975d1b7f11a8 Mon Sep 17 00:00:00 2001 From: Xiwei Pan Date: Mon, 30 Mar 2026 09:26:47 +0800 Subject: [PATCH 12/21] feat: add HamiltonianPathBetweenTwoVertices model (#831) Implement the fixed-endpoint Hamiltonian path problem (GJ Chapter 3) with brute-force solver, CLI support, canonical example, and paper entry. Co-Authored-By: Claude Opus 4.6 (1M context) --- docs/paper/reductions.typ | 52 ++++ problemreductions-cli/src/cli.rs | 1 + problemreductions-cli/src/commands/create.rs | 72 +++++- src/lib.rs | 9 +- .../hamiltonian_path_between_two_vertices.rs | 238 ++++++++++++++++++ src/models/graph/mod.rs | 4 + src/models/mod.rs | 24 +- .../hamiltonian_path_between_two_vertices.rs | 163 ++++++++++++ 8 files changed, 543 insertions(+), 20 deletions(-) create mode 100644 src/models/graph/hamiltonian_path_between_two_vertices.rs create mode 100644 src/unit_tests/models/graph/hamiltonian_path_between_two_vertices.rs diff --git a/docs/paper/reductions.typ b/docs/paper/reductions.typ index bfb9edde..4ccf8a79 100644 --- a/docs/paper/reductions.typ +++ b/docs/paper/reductions.typ @@ -85,6 +85,7 @@ "HamiltonianCircuit": [Hamiltonian Circuit], "BiconnectivityAugmentation": [Biconnectivity Augmentation], "HamiltonianPath": [Hamiltonian Path], + "HamiltonianPathBetweenTwoVertices": [Hamiltonian Path Between Two Vertices], "DirectedHamiltonianPath": [Directed Hamiltonian Path], "IntegralFlowBundles": [Integral Flow with Bundles], "LongestCircuit": [Longest Circuit], @@ -1114,6 +1115,57 @@ is feasible: each set induces a connected subgraph, the component weights are $2 ] ] } +#{ + let x = load-model-example("HamiltonianPathBetweenTwoVertices") + let nv = graph-num-vertices(x.instance) + let ne = graph-num-edges(x.instance) + let edges = x.instance.graph.edges + let s = x.instance.source_vertex + let t = x.instance.target_vertex + let sol = (config: x.optimal_config, metric: x.optimal_value) + let path = sol.config + let path-edges = range(path.len() - 1).map(i => (path.at(i), path.at(i + 1))) + [ + #problem-def("HamiltonianPathBetweenTwoVertices")[ + Given a graph $G = (V, E)$ and two distinguished vertices $s, t in V$, determine whether $G$ contains a _Hamiltonian s–t path_, i.e., a simple path that begins at $s$, ends at $t$, and visits every vertex of $G$ exactly once. + ][ + A classical NP-complete decision problem from Garey & Johnson (GT39, p. 60), closely related to _Hamiltonian Path_. Fixing the two endpoints of a Hamiltonian path gives a natural and useful specialisation: any Hamiltonian circuit can be converted into a Hamiltonian s–t path by removing the edge $(s, t)$, and conversely a Hamiltonian s–t path plus the edge $(s, t)$ yields a Hamiltonian circuit. The problem remains NP-complete for planar graphs and bipartite graphs. + + The best known exact algorithm is Björklund's randomized $O^*(1.657^n)$ "Determinant Sums" method @bjorklund2014. The classical Held--Karp dynamic programming algorithm solves it in $O(n^2 dot 2^n)$ deterministic time by initialising the DP only at $s$ and accepting only solutions that terminate at $t$ @heldkarp1962. + + Variables: $n = |V|$ values forming a permutation. Position $i$ holds the vertex visited at step $i$. A configuration is satisfying when it forms a valid permutation of all vertices, the first element is $s$, the last element is $t$, and consecutive vertices are adjacent in $G$. + + *Example.* Consider the graph $G$ on #nv vertices with edges ${#edges.map(((u, v)) => $\{#u, #v\}$).join(", ")}$, source $s = #s$, and target $t = #t$. The sequence $[#path.map(v => str(v)).join(", ")]$ is a Hamiltonian $s$–$t$ path: it starts at $s$, ends at $t$, visits every vertex exactly once, and each consecutive pair is adjacent --- #path-edges.map(((u, v)) => $\{#u, #v\}$).join($,$) $in E$. + + #pred-commands( + "pred create --example HamiltonianPathBetweenTwoVertices -o hpbtv.json", + "pred solve hpbtv.json", + "pred evaluate hpbtv.json --config " + x.optimal_config.map(str).join(","), + ) + + #figure({ + let blue = graph-colors.at(0) + let gray = luma(200) + canvas(length: 1cm, { + import draw: * + let verts = ((0, 1.5), (1.5, 1.5), (3, 1.5), (1.5, 0), (3, 0), (0, 0)) + for (u, v) in edges { + let on-path = path-edges.any(e => (e.at(0) == u and e.at(1) == v) or (e.at(0) == v and e.at(1) == u)) + g-edge(verts.at(u), verts.at(v), stroke: if on-path { 2pt + blue } else { 1pt + gray }) + } + for (k, pos) in verts.enumerate() { + let is-endpoint = k == s or k == t + g-node(pos, name: "v" + str(k), + fill: if is-endpoint { graph-colors.at(1) } else { blue }, + label: text(fill: white)[$v_#k$]) + } + }) + }, + caption: [Hamiltonian $s$–$t$ path in a #{nv}-vertex graph with $s = v_#s$ and $t = v_#t$ (orange). Blue edges show the path $#path.map(v => $v_#v$).join($arrow$)$.], + ) + ] + ] +} #{ let x = load-model-example("DirectedHamiltonianPath") let nv = x.instance.graph.num_vertices diff --git a/problemreductions-cli/src/cli.rs b/problemreductions-cli/src/cli.rs index 664c4654..6f21cc7e 100644 --- a/problemreductions-cli/src/cli.rs +++ b/problemreductions-cli/src/cli.rs @@ -218,6 +218,7 @@ Flags by problem type: MIS, MVC, MaxClique, MinDomSet --graph, --weights MaxCut, MaxMatching, TSP, BottleneckTravelingSalesman --graph, --edge-weights LongestPath --graph, --edge-lengths, --source-vertex, --target-vertex + HamiltonianPathBetweenTwoVertices --graph, --source-vertex, --target-vertex ShortestWeightConstrainedPath --graph, --edge-lengths, --edge-weights, --source-vertex, --target-vertex, --weight-bound MaximalIS --graph, --weights SAT, NAESAT --num-vars, --clauses diff --git a/problemreductions-cli/src/commands/create.rs b/problemreductions-cli/src/commands/create.rs index 47c345c2..f4ca81ef 100644 --- a/problemreductions-cli/src/commands/create.rs +++ b/problemreductions-cli/src/commands/create.rs @@ -15,10 +15,11 @@ use problemreductions::models::algebraic::{ use problemreductions::models::formula::Quantifier; use problemreductions::models::graph::{ DirectedHamiltonianPath, DisjointConnectingPaths, GeneralizedHex, HamiltonianCircuit, - HamiltonianPath, IntegralFlowBundles, LengthBoundedDisjointPaths, LongestCircuit, LongestPath, - MinimumCutIntoBoundedSets, MinimumDummyActivitiesPert, MinimumMultiwayCut, MixedChinesePostman, - MultipleChoiceBranching, PathConstrainedNetworkFlow, RootedTreeArrangement, SteinerTree, - SteinerTreeInGraphs, StrongConnectivityAugmentation, + HamiltonianPath, HamiltonianPathBetweenTwoVertices, IntegralFlowBundles, + LengthBoundedDisjointPaths, LongestCircuit, LongestPath, MinimumCutIntoBoundedSets, + MinimumDummyActivitiesPert, MinimumMultiwayCut, MixedChinesePostman, MultipleChoiceBranching, + PathConstrainedNetworkFlow, RootedTreeArrangement, SteinerTree, SteinerTreeInGraphs, + StrongConnectivityAugmentation, }; use problemreductions::models::misc::{ AdditionalKey, BinPacking, BoyceCoddNormalFormViolation, CapacityAssignment, CbqRelation, @@ -617,6 +618,9 @@ fn example_for(canonical: &str, graph_type: Option<&str>) -> &'static str { "--graph 0-1,1-2,2-3,3-4,4-5,5-6,6-7,0-7,1-5,2-6 --weights 2,3,1,2,3,1,2,1 --k 3 --bound 6" } "HamiltonianPath" => "--graph 0-1,1-2,2-3", + "HamiltonianPathBetweenTwoVertices" => { + "--graph 0-1,0-3,1-2,1-4,2-5,3-4,4-5,2-3 --source-vertex 0 --target-vertex 5" + } "LongestPath" => { "--graph 0-1,0-2,1-3,2-3,2-4,3-5,4-5,4-6,5-6,1-6 --edge-lengths 3,2,4,1,5,2,3,2,4,1 --source-vertex 0 --target-vertex 6" } @@ -1503,6 +1507,37 @@ pub fn create(args: &CreateArgs, out: &OutputConfig) -> Result<()> { (ser(HamiltonianPath::new(graph))?, resolved_variant.clone()) } + // Hamiltonian path between two specified vertices + "HamiltonianPathBetweenTwoVertices" => { + let usage = "pred create HamiltonianPathBetweenTwoVertices --graph 0-1,0-3,1-2,1-4,2-5,3-4,4-5,2-3 --source-vertex 0 --target-vertex 5"; + let (graph, _) = + parse_graph(args).map_err(|e| anyhow::anyhow!("{e}\n\nUsage: {usage}"))?; + let source_vertex = args.source_vertex.ok_or_else(|| { + anyhow::anyhow!( + "HamiltonianPathBetweenTwoVertices requires --source-vertex\n\nUsage: {usage}" + ) + })?; + let target_vertex = args.target_vertex.ok_or_else(|| { + anyhow::anyhow!( + "HamiltonianPathBetweenTwoVertices requires --target-vertex\n\nUsage: {usage}" + ) + })?; + ensure_vertex_in_bounds(source_vertex, graph.num_vertices(), "source_vertex")?; + ensure_vertex_in_bounds(target_vertex, graph.num_vertices(), "target_vertex")?; + anyhow::ensure!( + source_vertex != target_vertex, + "source_vertex and target_vertex must be distinct" + ); + ( + ser(HamiltonianPathBetweenTwoVertices::new( + graph, + source_vertex, + target_vertex, + ))?, + resolved_variant.clone(), + ) + } + // LongestPath "LongestPath" => { let usage = "pred create LongestPath --graph 0-1,0-2,1-3,2-3,2-4,3-5,4-5,4-6,5-6,1-6 --edge-lengths 3,2,4,1,5,2,3,2,4,1 --source-vertex 0 --target-vertex 6"; @@ -6804,6 +6839,35 @@ fn create_random( (ser(HamiltonianPath::new(graph))?, variant) } + // HamiltonianPathBetweenTwoVertices (graph + source/target) + "HamiltonianPathBetweenTwoVertices" => { + let num_vertices = num_vertices.max(2); + let edge_prob = args.edge_prob.unwrap_or(0.5); + if !(0.0..=1.0).contains(&edge_prob) { + bail!("--edge-prob must be between 0.0 and 1.0"); + } + let graph = util::create_random_graph(num_vertices, edge_prob, args.seed); + let source_vertex = args.source_vertex.unwrap_or(0); + let target_vertex = args + .target_vertex + .unwrap_or_else(|| num_vertices.saturating_sub(1)); + ensure_vertex_in_bounds(source_vertex, graph.num_vertices(), "source_vertex")?; + ensure_vertex_in_bounds(target_vertex, graph.num_vertices(), "target_vertex")?; + anyhow::ensure!( + source_vertex != target_vertex, + "source_vertex and target_vertex must be distinct" + ); + let variant = variant_map(&[("graph", "SimpleGraph")]); + ( + ser(HamiltonianPathBetweenTwoVertices::new( + graph, + source_vertex, + target_vertex, + ))?, + variant, + ) + } + // LongestCircuit (graph + unit edge lengths) "LongestCircuit" => { let edge_prob = args.edge_prob.unwrap_or(0.5); diff --git a/src/lib.rs b/src/lib.rs index 6c1af959..d8c41f14 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -53,10 +53,11 @@ pub mod prelude { AcyclicPartition, BalancedCompleteBipartiteSubgraph, BicliqueCover, BiconnectivityAugmentation, BottleneckTravelingSalesman, BoundedComponentSpanningForest, DirectedTwoCommodityIntegralFlow, DisjointConnectingPaths, GeneralizedHex, - HamiltonianCircuit, HamiltonianPath, IntegralFlowBundles, IntegralFlowHomologousArcs, - IntegralFlowWithMultipliers, IsomorphicSpanningTree, KClique, KthBestSpanningTree, - LengthBoundedDisjointPaths, LongestPath, MixedChinesePostman, SpinGlass, SteinerTree, - StrongConnectivityAugmentation, SubgraphIsomorphism, + HamiltonianCircuit, HamiltonianPath, HamiltonianPathBetweenTwoVertices, + IntegralFlowBundles, IntegralFlowHomologousArcs, IntegralFlowWithMultipliers, + IsomorphicSpanningTree, KClique, KthBestSpanningTree, LengthBoundedDisjointPaths, + LongestPath, MixedChinesePostman, SpinGlass, SteinerTree, StrongConnectivityAugmentation, + SubgraphIsomorphism, }; pub use crate::models::graph::{ KColoring, LongestCircuit, MaxCut, MaximalIS, MaximumClique, MaximumIndependentSet, diff --git a/src/models/graph/hamiltonian_path_between_two_vertices.rs b/src/models/graph/hamiltonian_path_between_two_vertices.rs new file mode 100644 index 00000000..08dfe840 --- /dev/null +++ b/src/models/graph/hamiltonian_path_between_two_vertices.rs @@ -0,0 +1,238 @@ +//! Hamiltonian Path Between Two Vertices problem implementation. +//! +//! The Hamiltonian Path Between Two Vertices problem asks whether a graph contains a +//! simple path that starts at a specified source vertex, ends at a specified target +//! vertex, and visits every other vertex exactly once. + +use crate::registry::{FieldInfo, ProblemSchemaEntry, VariantDimension}; +use crate::topology::{Graph, SimpleGraph}; +use crate::traits::Problem; +use crate::variant::VariantParam; +use serde::{Deserialize, Serialize}; + +inventory::submit! { + ProblemSchemaEntry { + name: "HamiltonianPathBetweenTwoVertices", + display_name: "Hamiltonian Path Between Two Vertices", + aliases: &[], + dimensions: &[ + VariantDimension::new("graph", "SimpleGraph", &["SimpleGraph"]), + ], + module_path: module_path!(), + description: "Find a Hamiltonian path between two specified vertices in a graph", + fields: &[ + FieldInfo { name: "graph", type_name: "G", description: "The underlying graph G=(V,E)" }, + FieldInfo { name: "source_vertex", type_name: "usize", description: "Source vertex s" }, + FieldInfo { name: "target_vertex", type_name: "usize", description: "Target vertex t" }, + ], + } +} + +/// The Hamiltonian Path Between Two Vertices problem. +/// +/// Given a graph G = (V, E) and two distinguished vertices s, t in V, +/// determine whether G contains a Hamiltonian path from s to t, i.e., +/// a simple path that begins at s, ends at t, and visits every vertex +/// exactly once. +/// +/// # Representation +/// +/// A configuration is a sequence of `n` vertex indices representing a vertex +/// ordering (permutation). Each position `i` in the configuration holds the +/// vertex visited at step `i`. A valid solution must be a permutation of +/// `0..n` where: +/// - The first element equals `source_vertex` +/// - The last element equals `target_vertex` +/// - Consecutive entries are adjacent in the graph +/// +/// The search space has `dims() = [n; n]` (each position can hold any of `n` +/// vertices), so brute-force enumerates `n^n` configurations. +/// +/// # Type Parameters +/// +/// * `G` - Graph type (e.g., SimpleGraph) +/// +/// # Example +/// +/// ``` +/// use problemreductions::models::graph::HamiltonianPathBetweenTwoVertices; +/// use problemreductions::topology::SimpleGraph; +/// use problemreductions::{Problem, Solver, BruteForce}; +/// +/// // Path graph: 0-1-2-3, source=0, target=3 +/// let graph = SimpleGraph::new(4, vec![(0, 1), (1, 2), (2, 3)]); +/// let problem = HamiltonianPathBetweenTwoVertices::new(graph, 0, 3); +/// +/// let solver = BruteForce::new(); +/// let solution = solver.find_witness(&problem); +/// assert!(solution.is_some()); +/// ``` +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(bound(deserialize = "G: serde::Deserialize<'de>"))] +pub struct HamiltonianPathBetweenTwoVertices { + graph: G, + source_vertex: usize, + target_vertex: usize, +} + +impl HamiltonianPathBetweenTwoVertices { + /// Create a new Hamiltonian Path Between Two Vertices problem. + /// + /// # Panics + /// + /// Panics if `source_vertex` or `target_vertex` is out of range, or if they are equal. + pub fn new(graph: G, source_vertex: usize, target_vertex: usize) -> Self { + let n = graph.num_vertices(); + assert!( + source_vertex < n, + "source_vertex {source_vertex} out of range for graph with {n} vertices" + ); + assert!( + target_vertex < n, + "target_vertex {target_vertex} out of range for graph with {n} vertices" + ); + assert_ne!( + source_vertex, target_vertex, + "source_vertex and target_vertex must be distinct" + ); + Self { + graph, + source_vertex, + target_vertex, + } + } + + /// Get a reference to the underlying graph. + pub fn graph(&self) -> &G { + &self.graph + } + + /// Get the source vertex s. + pub fn source_vertex(&self) -> usize { + self.source_vertex + } + + /// Get the target vertex t. + pub fn target_vertex(&self) -> usize { + self.target_vertex + } + + /// Get the number of vertices in the underlying graph. + pub fn num_vertices(&self) -> usize { + self.graph.num_vertices() + } + + /// Get the number of edges in the underlying graph. + pub fn num_edges(&self) -> usize { + self.graph.num_edges() + } + + /// Check if a configuration is a valid Hamiltonian s-t path. + pub fn is_valid_solution(&self, config: &[usize]) -> bool { + is_valid_hamiltonian_st_path(&self.graph, config, self.source_vertex, self.target_vertex) + } +} + +impl Problem for HamiltonianPathBetweenTwoVertices +where + G: Graph + VariantParam, +{ + const NAME: &'static str = "HamiltonianPathBetweenTwoVertices"; + type Value = crate::types::Or; + + fn variant() -> Vec<(&'static str, &'static str)> { + crate::variant_params![G] + } + + fn dims(&self) -> Vec { + let n = self.graph.num_vertices(); + vec![n; n] + } + + fn evaluate(&self, config: &[usize]) -> crate::types::Or { + crate::types::Or(is_valid_hamiltonian_st_path( + &self.graph, + config, + self.source_vertex, + self.target_vertex, + )) + } +} + +/// Check if a configuration represents a valid Hamiltonian s-t path in the graph. +/// +/// A valid Hamiltonian s-t path is a permutation of all vertices such that: +/// - The first element is `source` +/// - The last element is `target` +/// - Consecutive vertices in the permutation are adjacent in the graph +pub(crate) fn is_valid_hamiltonian_st_path( + graph: &G, + config: &[usize], + source: usize, + target: usize, +) -> bool { + let n = graph.num_vertices(); + if config.len() != n { + return false; + } + + // Check that config is a valid permutation of 0..n + let mut seen = vec![false; n]; + for &v in config { + if v >= n || seen[v] { + return false; + } + seen[v] = true; + } + + // Check endpoint constraints + if config[0] != source || config[n - 1] != target { + return false; + } + + // Check consecutive vertices are adjacent + for i in 0..n.saturating_sub(1) { + if !graph.has_edge(config[i], config[i + 1]) { + return false; + } + } + + true +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_model_example_specs() -> Vec { + // Instance from issue #831: 6 vertices, s=0, t=5 + // Hamiltonian s-t path: [0, 3, 2, 1, 4, 5] + vec![crate::example_db::specs::ModelExampleSpec { + id: "hamiltonian_path_between_two_vertices_simplegraph", + instance: Box::new(HamiltonianPathBetweenTwoVertices::new( + SimpleGraph::new( + 6, + vec![ + (0, 1), + (0, 3), + (1, 2), + (1, 4), + (2, 5), + (3, 4), + (4, 5), + (2, 3), + ], + ), + 0, + 5, + )), + optimal_config: vec![0, 3, 2, 1, 4, 5], + optimal_value: serde_json::json!(true), + }] +} + +// Use Bjorklund (2014) O*(1.657^n) as best known for general undirected graphs +crate::declare_variants! { + default HamiltonianPathBetweenTwoVertices => "1.657^num_vertices", +} + +#[cfg(test)] +#[path = "../../unit_tests/models/graph/hamiltonian_path_between_two_vertices.rs"] +mod tests; diff --git a/src/models/graph/mod.rs b/src/models/graph/mod.rs index 54ed94ca..aeed095d 100644 --- a/src/models/graph/mod.rs +++ b/src/models/graph/mod.rs @@ -23,6 +23,7 @@ //! - [`SpinGlass`]: Ising model Hamiltonian //! - [`MinimumMultiwayCut`]: Minimum weight multiway cut //! - [`HamiltonianPath`]: Hamiltonian path (simple path visiting every vertex) +//! - [`HamiltonianPathBetweenTwoVertices`]: Hamiltonian path between two specified vertices (decision problem) //! - [`LongestPath`]: Maximum-length simple s-t path //! - [`ShortestWeightConstrainedPath`]: Bicriteria simple s-t path with length and weight bounds //! - [`PartitionIntoPathsOfLength2`]: Partition vertices into triples with at least two edges each @@ -67,6 +68,7 @@ pub(crate) mod disjoint_connecting_paths; pub(crate) mod generalized_hex; pub(crate) mod hamiltonian_circuit; pub(crate) mod hamiltonian_path; +pub(crate) mod hamiltonian_path_between_two_vertices; pub(crate) mod integral_flow_bundles; pub(crate) mod integral_flow_homologous_arcs; pub(crate) mod integral_flow_with_multipliers; @@ -123,6 +125,7 @@ pub use disjoint_connecting_paths::DisjointConnectingPaths; pub use generalized_hex::GeneralizedHex; pub use hamiltonian_circuit::HamiltonianCircuit; pub use hamiltonian_path::HamiltonianPath; +pub use hamiltonian_path_between_two_vertices::HamiltonianPathBetweenTwoVertices; pub use integral_flow_bundles::IntegralFlowBundles; pub use integral_flow_homologous_arcs::IntegralFlowHomologousArcs; pub use integral_flow_with_multipliers::IntegralFlowWithMultipliers; @@ -178,6 +181,7 @@ pub(crate) fn canonical_model_example_specs() -> Vec1->2->3 + assert!(problem.evaluate(&[0, 1, 2, 3])); + // Reversed path fails (wrong source/target) + assert!(!problem.evaluate(&[3, 2, 1, 0])); + // Invalid: wrong start vertex + assert!(!problem.evaluate(&[1, 0, 2, 3])); + // Invalid: wrong end vertex + assert!(!problem.evaluate(&[0, 1, 3, 2])); + // Invalid: not a permutation + assert!(!problem.evaluate(&[0, 1, 1, 3])); +} + +#[test] +fn test_hamiltonian_path_between_two_vertices_no_solution() { + // C5 cycle: s=0, t=2 has no Hamiltonian s-t path (from issue #831) + let problem = HamiltonianPathBetweenTwoVertices::new( + SimpleGraph::new(5, vec![(0, 1), (1, 2), (2, 3), (3, 4), (4, 0)]), + 0, + 2, + ); + let solver = BruteForce::new(); + let solution = solver.find_witness(&problem); + assert!( + solution.is_none(), + "C5 with s=0, t=2 has no Hamiltonian s-t path" + ); +} + +#[test] +fn test_hamiltonian_path_between_two_vertices_brute_force() { + use crate::traits::Problem; + + // Issue #831 Example 1: 6 vertices, 8 edges, s=0, t=5 + let problem = HamiltonianPathBetweenTwoVertices::new( + SimpleGraph::new( + 6, + vec![ + (0, 1), + (0, 3), + (1, 2), + (1, 4), + (2, 5), + (3, 4), + (4, 5), + (2, 3), + ], + ), + 0, + 5, + ); + + let solver = BruteForce::new(); + let solution = solver.find_witness(&problem); + assert!(solution.is_some()); + let sol = solution.unwrap(); + assert!(problem.evaluate(&sol)); + assert_eq!(sol[0], 0, "Path must start at source vertex 0"); + assert_eq!(sol[5], 5, "Path must end at target vertex 5"); + + // Issue says there are exactly 4 distinct Hamiltonian s-t paths + let all = solver.find_all_witnesses(&problem); + assert_eq!(all.len(), 4); + for path in &all { + assert!(problem.evaluate(path)); + assert_eq!(path[0], 0); + assert_eq!(path[5], 5); + } +} + +#[test] +fn test_hamiltonian_path_between_two_vertices_is_valid_solution() { + let problem = + HamiltonianPathBetweenTwoVertices::new(SimpleGraph::new(3, vec![(0, 1), (1, 2)]), 0, 2); + assert!(problem.is_valid_solution(&[0, 1, 2])); + assert!(!problem.is_valid_solution(&[2, 1, 0])); // wrong direction + assert!(!problem.is_valid_solution(&[0, 2, 1])); // no edge 0-2 +} + +#[test] +fn test_hamiltonian_path_between_two_vertices_is_valid_st_path_function() { + let graph = SimpleGraph::new(4, vec![(0, 1), (1, 2), (2, 3)]); + // Valid: 0->1->2->3 with source=0, target=3 + assert!(is_valid_hamiltonian_st_path(&graph, &[0, 1, 2, 3], 0, 3)); + // Invalid: reversed (source=3 but we pass source=0) + assert!(!is_valid_hamiltonian_st_path(&graph, &[3, 2, 1, 0], 0, 3)); + // Invalid: edge 0->2 doesn't exist + assert!(!is_valid_hamiltonian_st_path(&graph, &[0, 2, 1, 3], 0, 3)); + // Invalid: wrong length + assert!(!is_valid_hamiltonian_st_path(&graph, &[0, 1, 3], 0, 3)); + // Invalid: vertex out of range + assert!(!is_valid_hamiltonian_st_path(&graph, &[0, 1, 2, 4], 0, 3)); +} + +#[test] +fn test_hamiltonian_path_between_two_vertices_serialization() { + let problem = + HamiltonianPathBetweenTwoVertices::new(SimpleGraph::new(3, vec![(0, 1), (1, 2)]), 0, 2); + let json = serde_json::to_value(&problem).unwrap(); + let deserialized: HamiltonianPathBetweenTwoVertices = + serde_json::from_value(json).unwrap(); + assert_eq!(deserialized.num_vertices(), 3); + assert_eq!(deserialized.num_edges(), 2); + assert_eq!(deserialized.source_vertex(), 0); + assert_eq!(deserialized.target_vertex(), 2); +} + +#[test] +fn test_hamiltonian_path_between_two_vertices_paper_example() { + use crate::traits::Problem; + + // Instance from issue #831: 6 vertices, s=0, t=5 + let problem = HamiltonianPathBetweenTwoVertices::new( + SimpleGraph::new( + 6, + vec![ + (0, 1), + (0, 3), + (1, 2), + (1, 4), + (2, 5), + (3, 4), + (4, 5), + (2, 3), + ], + ), + 0, + 5, + ); + + // Issue-specified solution: 0 -> 3 -> 2 -> 1 -> 4 -> 5 + assert!(problem.evaluate(&[0, 3, 2, 1, 4, 5])); + + // Verify edge-by-edge + let path = [0usize, 3, 2, 1, 4, 5]; + assert_eq!(path[0], 0, "path starts at s=0"); + assert_eq!(path[5], 5, "path ends at t=5"); + + // Verify brute force confirms the problem + let solver = BruteForce::new(); + let all = solver.find_all_witnesses(&problem); + assert_eq!(all.len(), 4, "issue says 4 Hamiltonian s-t paths exist"); + assert!(all.contains(&vec![0, 3, 2, 1, 4, 5])); +} From 399b77bae08791b42934d276aab7e6300145dc99 Mon Sep 17 00:00:00 2001 From: Xiwei Pan Date: Mon, 30 Mar 2026 10:18:43 +0800 Subject: [PATCH 13/21] feat: add MinimumMaximalMatching model (#832) Implement minimum maximal matching / minimum edge dominating set (GJ GT10) with ILP reduction, CLI support, canonical example, and paper entry. Co-Authored-By: Claude Opus 4.6 (1M context) --- docs/paper/reductions.typ | 57 ++++++ docs/paper/references.bib | 11 ++ problemreductions-cli/src/commands/create.rs | 31 +++- src/models/graph/minimum_maximal_matching.rs | 169 ++++++++++++++++++ src/models/graph/mod.rs | 4 + src/models/mod.rs | 12 +- src/rules/minimummaximalmatching_ilp.rs | 115 ++++++++++++ src/rules/mod.rs | 3 + .../models/graph/minimum_maximal_matching.rs | 110 ++++++++++++ .../rules/minimummaximalmatching_ilp.rs | 99 ++++++++++ 10 files changed, 602 insertions(+), 9 deletions(-) create mode 100644 src/models/graph/minimum_maximal_matching.rs create mode 100644 src/rules/minimummaximalmatching_ilp.rs create mode 100644 src/unit_tests/models/graph/minimum_maximal_matching.rs create mode 100644 src/unit_tests/rules/minimummaximalmatching_ilp.rs diff --git a/docs/paper/reductions.typ b/docs/paper/reductions.typ index 4ccf8a79..91a0551b 100644 --- a/docs/paper/reductions.typ +++ b/docs/paper/reductions.typ @@ -101,6 +101,7 @@ "KClique": [$k$-Clique], "MinimumDominatingSet": [Minimum Dominating Set], "MaximumMatching": [Maximum Matching], + "MinimumMaximalMatching": [Minimum Maximal Matching], "BottleneckTravelingSalesman": [Bottleneck Traveling Salesman], "TravelingSalesman": [Traveling Salesman], "MaximumClique": [Maximum Clique], @@ -2200,6 +2201,44 @@ is feasible: each set induces a connected subgraph, the component weights are $2 ] } +#{ + let x = load-model-example("MinimumMaximalMatching") + let nv = graph-num-vertices(x.instance) + let ne = graph-num-edges(x.instance) + let edges = x.instance.graph.edges + let sol = (config: x.optimal_config, metric: x.optimal_value) + let matched-edges = sol.config.enumerate().filter(((i, v)) => v == 1).map(((i, _)) => edges.at(i)) + let sz = metric-value(sol.metric) + [ + #problem-def("MinimumMaximalMatching")[ + Given $G = (V, E)$, find $M subset.eq E$ of minimum cardinality such that $M$ is a matching and $M$ is maximal: every $e in E backslash M$ shares an endpoint with some $e' in M$. + ][ + A maximal matching cannot be extended by any edge, so every non-selected edge must be "blocked" by a selected one. Among all such matchings, the problem seeks one of minimum size. Unlike Maximum Matching (solvable in $O(n^3)$ by Edmonds' algorithm @edmonds1965), Minimum Maximal Matching is NP-hard @yannakakis1980; it can also be viewed as a Minimum Dominating Set in the line graph. + + *Example.* Consider the path graph $P_#nv$ with $n = #nv$ vertices and $#ne$ edges. A minimum maximal matching is $M = {#matched-edges.map(((u, v)) => $(v_#u, v_#v)$).join(", ")}$ with $|M| = #sz$. Every unselected edge shares an endpoint with a selected one, so $M$ is maximal. + + #pred-commands( + "pred create --example MinimumMaximalMatching -o mmm.json", + "pred solve mmm.json", + "pred evaluate mmm.json --config " + x.optimal_config.map(str).join(","), + ) + + #figure({ + let vpos = range(nv).map(i => (i.bit-and(0xffff) * 1.5, 0)) + draw-edge-highlight(vpos, edges, matched-edges, + matched-edges.fold((), (acc, (u, v)) => { + let r = acc + if u not in r { r.push(u) } + if v not in r { r.push(v) } + r + })) + }, + caption: [Path $P_#nv$ with minimum maximal matching $M = {#matched-edges.map(((u, v)) => $(v_#u, v_#v)$).join(", ")}$ (blue, $|M| = #sz$).], + ) + ] + ] +} + #{ let x = load-model-example("MinimumDummyActivitiesPert") let nv = x.instance.graph.num_vertices @@ -9367,6 +9406,23 @@ The following reductions to Integer Linear Programming are straightforward formu _Solution extraction._ $I = {v : x_v = 1}$. ] +#reduction-rule("MinimumMaximalMatching", "ILP")[ + Each edge is either selected or not; matching and maximality constraints are both directly linear in binary edge indicators. +][ + _Construction._ Variables: $x_e in {0, 1}$ for each $e in E$. The ILP is: + $ + min quad & sum_e x_e \ + "subject to" quad & sum_(e in.rev v) x_e <= 1 quad forall v in V \ + & x_j + sum_(i : i ~ j,\ i eq.not j) x_i >= 1 quad forall j in E \ + & x_e in {0, 1} quad forall e in E + $, + where $i ~ j$ denotes that edges $i$ and $j$ share an endpoint. + + _Correctness._ Degree constraints enforce the matching property. For each edge $j$, the maximality constraint requires that $j$ itself or at least one adjacent edge is selected, ensuring the matching cannot be extended. ($arrow.r.double$) A minimum maximal matching satisfies both constraints and minimizes cardinality. ($arrow.l.double$) Any feasible solution is a maximal matching; the objective minimizes its size. + + _Solution extraction._ $M = {e : x_e = 1}$. +] + #reduction-rule("PartiallyOrderedKnapsack", "ILP")[ Standard knapsack with precedence constraints: item $b$ can only be selected if item $a$ is also selected for each precedence $(a, b)$. ][ @@ -11015,6 +11071,7 @@ The following table shows concrete variable overhead for example instances, take (source: "Factoring", target: "ILP"), (source: "MinimumSetCovering", target: "ILP"), (source: "MinimumDominatingSet", target: "ILP"), + (source: "MinimumMaximalMatching", target: "ILP"), (source: "MaximumClique", target: "ILP"), (source: "TravelingSalesman", target: "ILP"), ) diff --git a/docs/paper/references.bib b/docs/paper/references.bib index b82bb5b3..bc9e682f 100644 --- a/docs/paper/references.bib +++ b/docs/paper/references.bib @@ -1569,3 +1569,14 @@ @article{gonzalez1976 year = {1976}, doi = {10.1145/321978.321985} } + +@article{yannakakis1980, + author = {Mihalis Yannakakis and Fanica Gavril}, + title = {Edge Dominating Sets in Graphs}, + journal = {SIAM Journal on Applied Mathematics}, + volume = {38}, + number = {3}, + pages = {364--372}, + year = {1980}, + doi = {10.1137/0138030} +} diff --git a/problemreductions-cli/src/commands/create.rs b/problemreductions-cli/src/commands/create.rs index f4ca81ef..2f70d2a5 100644 --- a/problemreductions-cli/src/commands/create.rs +++ b/problemreductions-cli/src/commands/create.rs @@ -17,9 +17,9 @@ use problemreductions::models::graph::{ DirectedHamiltonianPath, DisjointConnectingPaths, GeneralizedHex, HamiltonianCircuit, HamiltonianPath, HamiltonianPathBetweenTwoVertices, IntegralFlowBundles, LengthBoundedDisjointPaths, LongestCircuit, LongestPath, MinimumCutIntoBoundedSets, - MinimumDummyActivitiesPert, MinimumMultiwayCut, MixedChinesePostman, MultipleChoiceBranching, - PathConstrainedNetworkFlow, RootedTreeArrangement, SteinerTree, SteinerTreeInGraphs, - StrongConnectivityAugmentation, + MinimumDummyActivitiesPert, MinimumMaximalMatching, MinimumMultiwayCut, MixedChinesePostman, + MultipleChoiceBranching, PathConstrainedNetworkFlow, RootedTreeArrangement, SteinerTree, + SteinerTreeInGraphs, StrongConnectivityAugmentation, }; use problemreductions::models::misc::{ AdditionalKey, BinPacking, BoyceCoddNormalFormViolation, CapacityAssignment, CbqRelation, @@ -682,6 +682,7 @@ fn example_for(canonical: &str, graph_type: Option<&str>) -> &'static str { "BalancedCompleteBipartiteSubgraph" => { "--left 4 --right 4 --biedges 0-0,0-1,0-2,1-0,1-1,1-2,2-0,2-1,2-2,3-0,3-1,3-3 --k 3" } + "MinimumMaximalMatching" => "--graph 0-1,1-2,2-3,3-4,4-5", "PartitionIntoTriangles" => "--graph 0-1,1-2,0-2", "Factoring" => "--target 15 --m 4 --n 4", "CapacityAssignment" => { @@ -1402,6 +1403,19 @@ pub fn create(args: &CreateArgs, out: &OutputConfig) -> Result<()> { ) } + // MinimumMaximalMatching (graph only, no weights) + "MinimumMaximalMatching" => { + let (graph, _) = parse_graph(args).map_err(|e| { + anyhow::anyhow!( + "{e}\n\nUsage: pred create MinimumMaximalMatching --graph 0-1,1-2,2-3,3-4,4-5" + ) + })?; + ( + ser(MinimumMaximalMatching::new(graph))?, + variant_map(&[("graph", "SimpleGraph")]), + ) + } + // Hamiltonian Circuit (graph only, no weights) "HamiltonianCircuit" => { let (graph, _) = parse_graph(args).map_err(|e| { @@ -6817,6 +6831,17 @@ fn create_random( ) } + // MinimumMaximalMatching (graph only, no weights) + "MinimumMaximalMatching" => { + let edge_prob = args.edge_prob.unwrap_or(0.5); + if !(0.0..=1.0).contains(&edge_prob) { + bail!("--edge-prob must be between 0.0 and 1.0"); + } + let graph = util::create_random_graph(num_vertices, edge_prob, args.seed); + let variant = variant_map(&[("graph", "SimpleGraph")]); + (ser(MinimumMaximalMatching::new(graph))?, variant) + } + // Hamiltonian Circuit (graph only, no weights) "HamiltonianCircuit" => { let edge_prob = args.edge_prob.unwrap_or(0.5); diff --git a/src/models/graph/minimum_maximal_matching.rs b/src/models/graph/minimum_maximal_matching.rs new file mode 100644 index 00000000..c77aa1b8 --- /dev/null +++ b/src/models/graph/minimum_maximal_matching.rs @@ -0,0 +1,169 @@ +//! MinimumMaximalMatching problem implementation. +//! +//! The Minimum Maximal Matching problem asks for a matching of minimum size +//! that is maximal (cannot be extended by adding any edge). + +use crate::registry::{FieldInfo, ProblemSchemaEntry, VariantDimension}; +use crate::topology::{Graph, SimpleGraph}; +use crate::traits::Problem; +use crate::types::Min; +use serde::{Deserialize, Serialize}; + +inventory::submit! { + ProblemSchemaEntry { + name: "MinimumMaximalMatching", + display_name: "Minimum Maximal Matching", + aliases: &[], + dimensions: &[ + VariantDimension::new("graph", "SimpleGraph", &["SimpleGraph"]), + ], + module_path: module_path!(), + description: "Find a minimum-size matching that cannot be extended", + fields: &[ + FieldInfo { name: "graph", type_name: "G", description: "The underlying graph G=(V,E)" }, + ], + } +} + +/// The Minimum Maximal Matching problem. +/// +/// Given a graph G = (V, E), find a matching M ⊆ E of minimum cardinality +/// such that M is maximal: every edge not in M shares an endpoint with some +/// edge in M (i.e., M cannot be extended by adding any further edge). +/// +/// # Type Parameters +/// +/// * `G` - The graph type (e.g., `SimpleGraph`) +/// +/// # Example +/// +/// ``` +/// use problemreductions::models::graph::MinimumMaximalMatching; +/// use problemreductions::topology::SimpleGraph; +/// use problemreductions::{Problem, Solver, BruteForce}; +/// +/// // Path graph P4: 0-1-2-3 +/// let graph = SimpleGraph::new(4, vec![(0, 1), (1, 2), (2, 3)]); +/// let problem = MinimumMaximalMatching::new(graph); +/// +/// let solver = BruteForce::new(); +/// let solution = solver.find_witness(&problem).unwrap(); +/// +/// // Minimum maximal matching has 1 edge (e.g., edge (1,2)) +/// let count: usize = solution.iter().sum(); +/// assert_eq!(count, 1); +/// ``` +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct MinimumMaximalMatching { + /// The underlying graph. + graph: G, +} + +impl MinimumMaximalMatching { + /// Create a MinimumMaximalMatching problem from a graph. + pub fn new(graph: G) -> Self { + Self { graph } + } + + /// Get a reference to the underlying graph. + pub fn graph(&self) -> &G { + &self.graph + } + + /// Get the number of vertices in the underlying graph. + pub fn num_vertices(&self) -> usize { + self.graph.num_vertices() + } + + /// Get the number of edges in the underlying graph. + pub fn num_edges(&self) -> usize { + self.graph.num_edges() + } + + /// Check whether a configuration is a valid maximal matching. + /// + /// Returns `true` iff: + /// 1. The selected edges form a matching (no two share an endpoint). + /// 2. The matching is maximal (every non-selected edge shares an endpoint + /// with some selected edge). + pub fn is_valid_maximal_matching(&self, config: &[usize]) -> bool { + let edges = self.graph.edges(); + let n = self.graph.num_vertices(); + + // Step 1: Check matching property. + let mut vertex_used = vec![false; n]; + for (idx, &sel) in config.iter().enumerate() { + if sel == 1 { + let (u, v) = edges[idx]; + if vertex_used[u] || vertex_used[v] { + return false; + } + vertex_used[u] = true; + vertex_used[v] = true; + } + } + + // Step 2: Check maximality — every unselected edge must be blocked. + for (idx, &sel) in config.iter().enumerate() { + if sel == 0 { + let (u, v) = edges[idx]; + // Edge (u,v) is blocked iff u or v is already matched. + if !vertex_used[u] && !vertex_used[v] { + return false; + } + } + } + + true + } +} + +impl Problem for MinimumMaximalMatching +where + G: Graph + crate::variant::VariantParam, +{ + const NAME: &'static str = "MinimumMaximalMatching"; + type Value = Min; + + fn variant() -> Vec<(&'static str, &'static str)> { + crate::variant_params![G] + } + + fn dims(&self) -> Vec { + vec![2; self.graph.num_edges()] + } + + fn evaluate(&self, config: &[usize]) -> Min { + if config.len() != self.graph.num_edges() { + return Min(None); + } + if !self.is_valid_maximal_matching(config) { + return Min(None); + } + let count = config.iter().filter(|&&x| x == 1).count(); + Min(Some(count)) + } +} + +crate::declare_variants! { + default MinimumMaximalMatching => "1.3160^num_vertices", +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_model_example_specs() -> Vec { + // Path graph P6: 6 vertices, edges [(0,1),(1,2),(2,3),(3,4),(4,5)] + // config [0,1,0,1,0] = edges {(1,2),(3,4)} — a maximal matching of size 2. + vec![crate::example_db::specs::ModelExampleSpec { + id: "minimum_maximal_matching_simplegraph", + instance: Box::new(MinimumMaximalMatching::new(SimpleGraph::new( + 6, + vec![(0, 1), (1, 2), (2, 3), (3, 4), (4, 5)], + ))), + optimal_config: vec![0, 1, 0, 1, 0], + optimal_value: serde_json::json!(2), + }] +} + +#[cfg(test)] +#[path = "../../unit_tests/models/graph/minimum_maximal_matching.rs"] +mod tests; diff --git a/src/models/graph/mod.rs b/src/models/graph/mod.rs index aeed095d..fa7180e2 100644 --- a/src/models/graph/mod.rs +++ b/src/models/graph/mod.rs @@ -19,6 +19,7 @@ //! - [`KColoring`]: K-vertex coloring //! - [`PartitionIntoTriangles`]: Partition vertices into triangles //! - [`MaximumMatching`]: Maximum weight matching +//! - [`MinimumMaximalMatching`]: Minimum-size maximal matching //! - [`TravelingSalesman`]: Traveling Salesman (minimum weight Hamiltonian cycle) //! - [`SpinGlass`]: Ising model Hamiltonian //! - [`MinimumMultiwayCut`]: Minimum weight multiway cut @@ -90,6 +91,7 @@ pub(crate) mod minimum_dominating_set; pub(crate) mod minimum_dummy_activities_pert; pub(crate) mod minimum_feedback_arc_set; pub(crate) mod minimum_feedback_vertex_set; +pub(crate) mod minimum_maximal_matching; pub(crate) mod minimum_multiway_cut; pub(crate) mod minimum_sum_multicenter; pub(crate) mod minimum_vertex_cover; @@ -147,6 +149,7 @@ pub use minimum_dominating_set::MinimumDominatingSet; pub use minimum_dummy_activities_pert::MinimumDummyActivitiesPert; pub use minimum_feedback_arc_set::MinimumFeedbackArcSet; pub use minimum_feedback_vertex_set::MinimumFeedbackVertexSet; +pub use minimum_maximal_matching::MinimumMaximalMatching; pub use minimum_multiway_cut::MinimumMultiwayCut; pub use minimum_sum_multicenter::MinimumSumMulticenter; pub use minimum_vertex_cover::MinimumVertexCover; @@ -193,6 +196,7 @@ pub(crate) fn canonical_model_example_specs() -> Vec= 1 +//! (if edge j is not selected, at least one edge adjacent to it must be) +//! - Objective: Minimize sum e_i + +use crate::models::algebraic::{LinearConstraint, ObjectiveSense, ILP}; +use crate::models::graph::MinimumMaximalMatching; +use crate::reduction; +use crate::rules::traits::{ReduceTo, ReductionResult}; +use crate::topology::{Graph, SimpleGraph}; + +/// Result of reducing MinimumMaximalMatching to ILP. +/// +/// This reduction creates a binary ILP where: +/// - Each edge corresponds to a binary variable +/// - Vertex constraints ensure at most one incident edge is selected per vertex +/// - Edge constraints ensure that each edge is either selected or blocked by an adjacent +/// selected edge (maximality) +/// - The objective minimizes the total number of selected edges +#[derive(Debug, Clone)] +pub struct ReductionMMMToILP { + target: ILP, +} + +impl ReductionResult for ReductionMMMToILP { + type Source = MinimumMaximalMatching; + type Target = ILP; + + fn target_problem(&self) -> &ILP { + &self.target + } + + /// Extract solution from ILP back to MinimumMaximalMatching. + /// + /// Since the mapping is 1:1 (each edge maps to one binary variable), + /// the solution extraction is simply copying the configuration. + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + target_solution.to_vec() + } +} + +#[reduction( + overhead = { + num_vars = "num_edges", + num_constraints = "num_vertices + num_edges", + } +)] +impl ReduceTo> for MinimumMaximalMatching { + type Result = ReductionMMMToILP; + + fn reduce_to(&self) -> Self::Result { + let edges = self.graph().edges(); + let num_vars = edges.len(); + let mut constraints = Vec::new(); + + // Matching constraints: for each vertex v, sum of incident edge variables <= 1. + // Build vertex -> incident edge index map. + let n = self.graph().num_vertices(); + let mut v2e: Vec> = vec![Vec::new(); n]; + for (idx, &(u, v)) in edges.iter().enumerate() { + v2e[u].push(idx); + v2e[v].push(idx); + } + for incident in &v2e { + if !incident.is_empty() { + let terms: Vec<(usize, f64)> = incident.iter().map(|&e| (e, 1.0)).collect(); + constraints.push(LinearConstraint::le(terms, 1.0)); + } + } + + // Maximality constraints: for each edge j, the closed neighborhood (j itself plus all + // edges sharing an endpoint with j) must contain at least one selected edge. + // i.e. e_j + sum_{i: i shares endpoint with j, i≠j} e_i >= 1 for all j. + for (j, &(uj, vj)) in edges.iter().enumerate() { + // Collect all edges in the closed neighborhood of edge j. + let mut neighbors: Vec = vec![j]; + for &i in v2e[uj].iter().chain(v2e[vj].iter()) { + if i != j && !neighbors.contains(&i) { + neighbors.push(i); + } + } + let terms: Vec<(usize, f64)> = neighbors.iter().map(|&i| (i, 1.0)).collect(); + constraints.push(LinearConstraint::ge(terms, 1.0)); + } + + // Objective: minimize sum e_i + let objective: Vec<(usize, f64)> = (0..num_vars).map(|i| (i, 1.0)).collect(); + + let target = ILP::new(num_vars, constraints, objective, ObjectiveSense::Minimize); + ReductionMMMToILP { target } + } +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_rule_example_specs() -> Vec { + vec![crate::example_db::specs::RuleExampleSpec { + id: "minimummaximalmatching_to_ilp", + build: || { + // Path graph P6 + let source = MinimumMaximalMatching::new(SimpleGraph::new( + 6, + vec![(0, 1), (1, 2), (2, 3), (3, 4), (4, 5)], + )); + crate::example_db::specs::rule_example_via_ilp::<_, bool>(source) + }, + }] +} + +#[cfg(test)] +#[path = "../unit_tests/rules/minimummaximalmatching_ilp.rs"] +mod tests; diff --git a/src/rules/mod.rs b/src/rules/mod.rs index 8a726061..e6a9e921 100644 --- a/src/rules/mod.rs +++ b/src/rules/mod.rs @@ -166,6 +166,8 @@ pub(crate) mod minimumhittingset_ilp; #[cfg(feature = "ilp-solver")] pub(crate) mod minimuminternalmacrodatacompression_ilp; #[cfg(feature = "ilp-solver")] +pub(crate) mod minimummaximalmatching_ilp; +#[cfg(feature = "ilp-solver")] pub(crate) mod minimummultiwaycut_ilp; #[cfg(feature = "ilp-solver")] pub(crate) mod minimumsetcovering_ilp; @@ -363,6 +365,7 @@ pub(crate) fn canonical_rule_example_specs() -> Vec = serde_json::from_str(&json).unwrap(); + assert_eq!(deserialized.num_vertices(), 4); + assert_eq!(deserialized.num_edges(), 3); +} diff --git a/src/unit_tests/rules/minimummaximalmatching_ilp.rs b/src/unit_tests/rules/minimummaximalmatching_ilp.rs new file mode 100644 index 00000000..278244d3 --- /dev/null +++ b/src/unit_tests/rules/minimummaximalmatching_ilp.rs @@ -0,0 +1,99 @@ +use super::*; +use crate::solvers::{BruteForce, ILPSolver}; +use crate::topology::SimpleGraph; +use crate::traits::Problem; +use crate::types::Min; + +#[test] +fn test_reduction_creates_valid_ilp() { + // Path P4: 4 vertices, 3 edges + let problem = MinimumMaximalMatching::new(SimpleGraph::new(4, vec![(0, 1), (1, 2), (2, 3)])); + let reduction: ReductionMMMToILP = ReduceTo::>::reduce_to(&problem); + let ilp = reduction.target_problem(); + + // num_vars = num_edges = 3 + assert_eq!(ilp.num_vars, 3, "Should have one variable per edge"); + // num_constraints = num_vertices (with degree >= 1) + num_edges + // Vertices 0,1,2,3 all have degree >= 1 → 4 matching constraints + 3 maximality constraints + assert_eq!(ilp.constraints.len(), 7); + assert_eq!(ilp.sense, ObjectiveSense::Minimize, "Should minimize"); +} + +#[test] +fn test_minimummaximalmatching_to_ilp_closed_loop() { + // Path P4: optimal minimum maximal matching = 1 edge (center edge (1,2)). + let problem = MinimumMaximalMatching::new(SimpleGraph::new(4, vec![(0, 1), (1, 2), (2, 3)])); + let reduction: ReductionMMMToILP = ReduceTo::>::reduce_to(&problem); + + let bf = BruteForce::new(); + let ilp_solver = ILPSolver::new(); + + let bf_solution = bf.find_witness(&problem).unwrap(); + let bf_value = problem.evaluate(&bf_solution); + + let ilp_solution = ilp_solver + .solve(reduction.target_problem()) + .expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + let ilp_value = problem.evaluate(&extracted); + + assert_eq!(bf_value, Min(Some(1))); + assert_eq!(ilp_value, Min(Some(1))); + assert!(problem.evaluate(&extracted).is_valid()); +} + +#[test] +fn test_minimummaximalmatching_to_ilp_path_p6() { + // Path P6: optimal = 2 edges. + let problem = MinimumMaximalMatching::new(SimpleGraph::new( + 6, + vec![(0, 1), (1, 2), (2, 3), (3, 4), (4, 5)], + )); + let reduction: ReductionMMMToILP = ReduceTo::>::reduce_to(&problem); + + let ilp_solver = ILPSolver::new(); + let ilp_solution = ilp_solver + .solve(reduction.target_problem()) + .expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + + assert_eq!(problem.evaluate(&extracted), Min(Some(2))); +} + +#[test] +fn test_minimummaximalmatching_to_ilp_triangle() { + // Triangle: optimal = 1 (any single edge is maximal). + let problem = MinimumMaximalMatching::new(SimpleGraph::new(3, vec![(0, 1), (1, 2), (0, 2)])); + let reduction: ReductionMMMToILP = ReduceTo::>::reduce_to(&problem); + + let ilp_solver = ILPSolver::new(); + let ilp_solution = ilp_solver + .solve(reduction.target_problem()) + .expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + + assert_eq!(problem.evaluate(&extracted), Min(Some(1))); + assert!(problem.evaluate(&extracted).is_valid()); +} + +#[test] +fn test_minimummaximalmatching_to_ilp_bf_vs_ilp() { + let problem = MinimumMaximalMatching::new(SimpleGraph::new( + 6, + vec![(0, 1), (1, 2), (2, 3), (3, 4), (4, 5)], + )); + let reduction: ReductionMMMToILP = ReduceTo::>::reduce_to(&problem); + crate::rules::test_helpers::assert_bf_vs_ilp(&problem, &reduction); +} + +#[test] +fn test_empty_graph() { + let problem = MinimumMaximalMatching::new(SimpleGraph::new(3, vec![])); + let reduction: ReductionMMMToILP = ReduceTo::>::reduce_to(&problem); + let ilp = reduction.target_problem(); + + assert_eq!(ilp.num_vars, 0); + assert_eq!(ilp.constraints.len(), 0); + assert!(problem.evaluate(&[]).is_valid()); + assert_eq!(problem.evaluate(&[]), Min(Some(0))); +} From 4b7d61b8608cb62b185ac7a2550782a5ef2bf5bd Mon Sep 17 00:00:00 2001 From: Xiwei Pan Date: Mon, 30 Mar 2026 10:56:14 +0800 Subject: [PATCH 14/21] feat: add PartitionIntoForests model (#833) Implement vertex arboricity / partition into forests feasibility problem (GJ GT14) with brute-force solver, CLI support, canonical example, and paper entry. Co-Authored-By: Claude Opus 4.6 (1M context) --- docs/paper/reductions.typ | 26 +++ problemreductions-cli/src/commands/create.rs | 28 +++ src/models/graph/mod.rs | 4 + src/models/graph/partition_into_forests.rs | 184 ++++++++++++++++++ src/models/mod.rs | 9 +- .../models/graph/partition_into_forests.rs | 117 +++++++++++ 6 files changed, 364 insertions(+), 4 deletions(-) create mode 100644 src/models/graph/partition_into_forests.rs create mode 100644 src/unit_tests/models/graph/partition_into_forests.rs diff --git a/docs/paper/reductions.typ b/docs/paper/reductions.typ index 91a0551b..12eb49cb 100644 --- a/docs/paper/reductions.typ +++ b/docs/paper/reductions.typ @@ -179,6 +179,7 @@ "ExpectedRetrievalCost": [Expected Retrieval Cost], "MultiprocessorScheduling": [Multiprocessor Scheduling], "ProductionPlanning": [Production Planning], + "PartitionIntoForests": [Partition into Forests], "PartitionIntoPathsOfLength2": [Partition into Paths of Length 2], "PartitionIntoTriangles": [Partition Into Triangles], "PrecedenceConstrainedScheduling": [Precedence Constrained Scheduling], @@ -4324,6 +4325,31 @@ A classical NP-complete problem from Garey and Johnson @garey1979[Ch.~3, p.~76], ] } +#{ + let x = load-model-example("PartitionIntoForests") + let nv = graph-num-vertices(x.instance) + let ne = graph-num-edges(x.instance) + let edges = x.instance.graph.edges + let K = x.instance.num_forests + let sol = (config: x.optimal_config, metric: x.optimal_value) + let classes = range(K).map(c => sol.config.enumerate().filter(((i, v)) => v == c).map(((i, _)) => i)) + [ + #problem-def("PartitionIntoForests")[ + Given an undirected graph $G = (V, E)$ and a positive integer $K$, determine whether the vertex set $V$ can be partitioned into $K$ non-empty classes $V_1, dots, V_K$ such that the subgraph $G[V_i]$ induced by each class $V_i$ is a forest (acyclic graph). + ][ + Partition Into Forests is NP-complete @garey1979[GT18]. The problem asks whether the vertex set can be split into $K$ classes each inducing an acyclic subgraph; it generalises arboricity decomposition (covering all edges with $K$ forests, solvable in polynomial time) to the decision problem where the partition need not cover all edges. The best known exact algorithm uses brute-force enumeration in $O^*(K^n)$ time. + + *Example.* Consider $G$ with $n = #nv$ vertices and edges #edges.map(((u, v)) => [${#u, #v}$]).join(", "). With $K = #K$, the partition #classes.enumerate().map(((i, c)) => $V_#(i + 1) = {#c.map(v => $v_#v$).join(", ")}$).join(", ") is valid: each induced subgraph is acyclic. + + #pred-commands( + "pred create --example PartitionIntoForests -o partition-into-forests.json", + "pred solve partition-into-forests.json", + "pred evaluate partition-into-forests.json --config " + x.optimal_config.map(str).join(","), + ) + ] + ] +} + #{ let x = load-model-example("BinPacking") let sizes = x.instance.sizes diff --git a/problemreductions-cli/src/commands/create.rs b/problemreductions-cli/src/commands/create.rs index 2f70d2a5..a0ef3c22 100644 --- a/problemreductions-cli/src/commands/create.rs +++ b/problemreductions-cli/src/commands/create.rs @@ -684,6 +684,7 @@ fn example_for(canonical: &str, graph_type: Option<&str>) -> &'static str { } "MinimumMaximalMatching" => "--graph 0-1,1-2,2-3,3-4,4-5", "PartitionIntoTriangles" => "--graph 0-1,1-2,0-2", + "PartitionIntoForests" => "--graph 0-1,1-2,2-0,3-4,4-5,5-3 --k 2", "Factoring" => "--target 15 --m 4 --n 4", "CapacityAssignment" => { "--capacities 1,2,3 --cost-matrix \"1,3,6;2,4,7;1,2,5\" --delay-matrix \"8,4,1;7,3,1;6,3,1\" --delay-budget 12" @@ -4827,6 +4828,33 @@ pub fn create(args: &CreateArgs, out: &OutputConfig) -> Result<()> { ) } + // PartitionIntoForests + "PartitionIntoForests" => { + let (graph, _) = parse_graph(args).map_err(|e| { + anyhow::anyhow!( + "{e}\n\nUsage: pred create PartitionIntoForests --graph 0-1,1-2,2-0,3-4,4-5,5-3 --k 2" + ) + })?; + let num_forests = args.k.ok_or_else(|| { + anyhow::anyhow!( + "PartitionIntoForests requires --k (number of forest classes)\n\n\ + Usage: pred create PartitionIntoForests --graph 0-1,1-2,2-0,3-4,4-5,5-3 --k 2" + ) + })?; + anyhow::ensure!( + num_forests >= 1, + "PartitionIntoForests requires --k >= 1, got {}", + num_forests + ); + ( + ser(problemreductions::models::graph::PartitionIntoForests::new( + graph, + num_forests, + ))?, + resolved_variant.clone(), + ) + } + // ShortestCommonSupersequence "ShortestCommonSupersequence" => { let usage = "Usage: pred create SCS --strings \"0,1,2;1,2,0\""; diff --git a/src/models/graph/mod.rs b/src/models/graph/mod.rs index fa7180e2..1fbec621 100644 --- a/src/models/graph/mod.rs +++ b/src/models/graph/mod.rs @@ -27,6 +27,7 @@ //! - [`HamiltonianPathBetweenTwoVertices`]: Hamiltonian path between two specified vertices (decision problem) //! - [`LongestPath`]: Maximum-length simple s-t path //! - [`ShortestWeightConstrainedPath`]: Bicriteria simple s-t path with length and weight bounds +//! - [`PartitionIntoForests`]: Partition vertices into K classes each inducing an acyclic subgraph //! - [`PartitionIntoPathsOfLength2`]: Partition vertices into triples with at least two edges each //! - [`BicliqueCover`]: Biclique cover on bipartite graphs //! - [`SteinerTreeInGraphs`]: Minimum weight Steiner tree connecting terminal vertices @@ -100,6 +101,7 @@ pub(crate) mod multiple_choice_branching; pub(crate) mod multiple_copy_file_allocation; pub(crate) mod optimal_linear_arrangement; pub(crate) mod partial_feedback_edge_set; +pub(crate) mod partition_into_forests; pub(crate) mod partition_into_paths_of_length_2; pub(crate) mod partition_into_triangles; pub(crate) mod path_constrained_network_flow; @@ -158,6 +160,7 @@ pub use multiple_choice_branching::MultipleChoiceBranching; pub use multiple_copy_file_allocation::MultipleCopyFileAllocation; pub use optimal_linear_arrangement::OptimalLinearArrangement; pub use partial_feedback_edge_set::PartialFeedbackEdgeSet; +pub use partition_into_forests::PartitionIntoForests; pub use partition_into_paths_of_length_2::PartitionIntoPathsOfLength2; pub use partition_into_triangles::PartitionIntoTriangles; pub use path_constrained_network_flow::PathConstrainedNetworkFlow; @@ -216,6 +219,7 @@ pub(crate) fn canonical_model_example_specs() -> Vec= 1)" }, + ], + } +} + +/// The Partition Into Forests problem. +/// +/// Given a graph G = (V, E) and a positive integer K, determine whether the +/// vertices can be partitioned into K classes V_1, ..., V_K such that the +/// subgraph induced by each V_i is a forest (contains no cycle). +/// +/// # Type Parameters +/// +/// * `G` - Graph type (e.g., SimpleGraph) +/// +/// # Example +/// +/// ``` +/// use problemreductions::models::graph::PartitionIntoForests; +/// use problemreductions::topology::SimpleGraph; +/// use problemreductions::{Problem, Solver, BruteForce}; +/// +/// // Graph containing two triangles; K=2 forests suffice +/// let graph = SimpleGraph::new(6, vec![(0,1),(1,2),(2,0),(2,3),(3,4),(4,5),(5,3)]); +/// let problem = PartitionIntoForests::new(graph, 2); +/// +/// let solver = BruteForce::new(); +/// let solution = solver.find_witness(&problem); +/// assert!(solution.is_some()); +/// ``` +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(bound(deserialize = "G: serde::Deserialize<'de>"))] +pub struct PartitionIntoForests { + /// The underlying graph. + graph: G, + /// Number of forest classes. + num_forests: usize, +} + +impl PartitionIntoForests { + /// Create a new Partition Into Forests instance. + /// + /// # Panics + /// Panics if `num_forests` is zero. + pub fn new(graph: G, num_forests: usize) -> Self { + assert!(num_forests >= 1, "num_forests must be at least 1"); + Self { graph, num_forests } + } + + /// Get a reference to the underlying graph. + pub fn graph(&self) -> &G { + &self.graph + } + + /// Get the number of forest classes. + pub fn num_forests(&self) -> usize { + self.num_forests + } + + /// Get the number of vertices in the underlying graph. + pub fn num_vertices(&self) -> usize { + self.graph.num_vertices() + } + + /// Get the number of edges in the underlying graph. + pub fn num_edges(&self) -> usize { + self.graph.num_edges() + } +} + +impl Problem for PartitionIntoForests +where + G: Graph + VariantParam, +{ + const NAME: &'static str = "PartitionIntoForests"; + type Value = crate::types::Or; + + fn variant() -> Vec<(&'static str, &'static str)> { + crate::variant_params![G] + } + + fn dims(&self) -> Vec { + vec![self.num_forests; self.graph.num_vertices()] + } + + fn evaluate(&self, config: &[usize]) -> crate::types::Or { + crate::types::Or(is_valid_forest_partition( + &self.graph, + self.num_forests, + config, + )) + } +} + +/// Check whether `config` is a valid K-forest partition of `graph`. +fn is_valid_forest_partition(graph: &G, num_forests: usize, config: &[usize]) -> bool { + let n = graph.num_vertices(); + + // Basic validity checks + if config.len() != n { + return false; + } + if config.iter().any(|&c| c >= num_forests) { + return false; + } + + // For each forest class, verify the induced subgraph is acyclic using union-find. + // An undirected graph is acyclic iff union-find never sees an edge (u, v) where + // u and v already share a component. + let mut parent: Vec = (0..n).collect(); + + fn find(parent: &mut Vec, x: usize) -> usize { + if parent[x] != x { + parent[x] = find(parent, parent[x]); + } + parent[x] + } + + for (u, v) in graph.edges() { + if config[u] != config[v] { + // Edge crosses classes — not in any induced subgraph + continue; + } + // Both u and v are in the same class; check for cycle + let ru = find(&mut parent, u); + let rv = find(&mut parent, v); + if ru == rv { + return false; // Cycle detected + } + parent[ru] = rv; // Union + } + + true +} + +crate::declare_variants! { + default PartitionIntoForests => "num_forests^num_vertices", +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_model_example_specs() -> Vec { + vec![crate::example_db::specs::ModelExampleSpec { + id: "partition_into_forests_simplegraph", + instance: Box::new(PartitionIntoForests::new( + SimpleGraph::new( + 6, + vec![(0, 1), (1, 2), (2, 0), (2, 3), (3, 4), (4, 5), (5, 3)], + ), + 2, + )), + // V0={0,3}: edges from graph in class 0: none among {0,3} → forest + // V1={1,2,4,5}: edges (1,2),(3,4) but 3∉V1; edges among V1: (1,2),(4,5) → path forest + optimal_config: vec![0, 1, 1, 0, 1, 1], + optimal_value: serde_json::json!(true), + }] +} + +#[cfg(test)] +#[path = "../../unit_tests/models/graph/partition_into_forests.rs"] +mod tests; diff --git a/src/models/mod.rs b/src/models/mod.rs index 194bc5f4..7e049f8d 100644 --- a/src/models/mod.rs +++ b/src/models/mod.rs @@ -31,10 +31,11 @@ pub use graph::{ MinimumFeedbackArcSet, MinimumFeedbackVertexSet, MinimumMaximalMatching, MinimumMultiwayCut, MinimumSumMulticenter, MinimumVertexCover, MixedChinesePostman, MultipleChoiceBranching, MultipleCopyFileAllocation, OptimalLinearArrangement, PartialFeedbackEdgeSet, - PartitionIntoPathsOfLength2, PartitionIntoTriangles, PathConstrainedNetworkFlow, - RootedTreeArrangement, RuralPostman, ShortestWeightConstrainedPath, SpinGlass, SteinerTree, - SteinerTreeInGraphs, StrongConnectivityAugmentation, SubgraphIsomorphism, TravelingSalesman, - UndirectedFlowLowerBounds, UndirectedTwoCommodityIntegralFlow, + PartitionIntoForests, PartitionIntoPathsOfLength2, PartitionIntoTriangles, + PathConstrainedNetworkFlow, RootedTreeArrangement, RuralPostman, ShortestWeightConstrainedPath, + SpinGlass, SteinerTree, SteinerTreeInGraphs, StrongConnectivityAugmentation, + SubgraphIsomorphism, TravelingSalesman, UndirectedFlowLowerBounds, + UndirectedTwoCommodityIntegralFlow, }; pub use misc::PartiallyOrderedKnapsack; pub use misc::{ diff --git a/src/unit_tests/models/graph/partition_into_forests.rs b/src/unit_tests/models/graph/partition_into_forests.rs new file mode 100644 index 00000000..bd849d81 --- /dev/null +++ b/src/unit_tests/models/graph/partition_into_forests.rs @@ -0,0 +1,117 @@ +use super::*; +use crate::solvers::BruteForce; +use crate::topology::SimpleGraph; +use crate::traits::Problem; + +fn two_triangle_instance() -> PartitionIntoForests { + // Two disjoint triangles + bridge edge (2,3) + // Triangle A: 0-1-2-0, Triangle B: 3-4-5-3 + PartitionIntoForests::new( + SimpleGraph::new( + 6, + vec![(0, 1), (1, 2), (2, 0), (2, 3), (3, 4), (4, 5), (5, 3)], + ), + 2, + ) +} + +#[test] +fn test_partition_into_forests_creation() { + let problem = two_triangle_instance(); + assert_eq!(problem.num_vertices(), 6); + assert_eq!(problem.num_edges(), 7); + assert_eq!(problem.num_forests(), 2); + assert_eq!(problem.dims(), vec![2; 6]); + assert_eq!(problem.graph().num_vertices(), 6); +} + +#[test] +fn test_partition_into_forests_evaluate_positive() { + let problem = two_triangle_instance(); + + // config [0,1,1,0,1,1]: class0={0,3} (no intra-class edges), class1={1,2,4,5} (edges 1-2, 4-5, both trees) + assert!(problem.evaluate(&[0, 1, 1, 0, 1, 1])); + + // config [0,0,1,1,0,1]: class0={0,1,4} (edge 0-1 → path), class1={2,3,5} (no triangle edges remain) + assert!(problem.evaluate(&[0, 0, 1, 1, 0, 1])); +} + +#[test] +fn test_partition_into_forests_evaluate_negative_k1() { + // K=1: must put all vertices in one class; two triangles create cycles + let problem = PartitionIntoForests::new( + SimpleGraph::new(6, vec![(0, 1), (1, 2), (2, 0), (3, 4), (4, 5), (5, 3)]), + 1, + ); + + // Any single-class assignment must include a triangle → cycle + assert!(!problem.evaluate(&[0, 0, 0, 0, 0, 0])); +} + +#[test] +fn test_partition_into_forests_evaluate_cycle_in_class() { + let problem = two_triangle_instance(); + + // config [0,0,0,1,1,1]: class0={0,1,2} contains triangle 0-1-2-0 → cycle + assert!(!problem.evaluate(&[0, 0, 0, 1, 1, 1])); +} + +#[test] +fn test_partition_into_forests_evaluate_wrong_config_length() { + let problem = two_triangle_instance(); + assert!(!problem.evaluate(&[0, 1, 0])); + assert!(!problem.evaluate(&[0, 1, 0, 0, 1, 1, 0])); +} + +#[test] +fn test_partition_into_forests_evaluate_out_of_range_class() { + let problem = two_triangle_instance(); + // Class 2 doesn't exist (num_forests=2) + assert!(!problem.evaluate(&[0, 1, 2, 0, 1, 1])); +} + +#[test] +fn test_partition_into_forests_brute_force_finds_solution() { + // Small instance: 4-cycle (no triangle), K=2 should work easily + let problem = + PartitionIntoForests::new(SimpleGraph::new(4, vec![(0, 1), (1, 2), (2, 3), (3, 0)]), 2); + let solver = BruteForce::new(); + let solution = solver.find_witness(&problem); + assert!(solution.is_some()); + assert!(problem.evaluate(&solution.unwrap())); +} + +#[test] +fn test_partition_into_forests_brute_force_no_solution() { + // Single triangle, K=1: impossible + let problem = PartitionIntoForests::new(SimpleGraph::new(3, vec![(0, 1), (1, 2), (2, 0)]), 1); + let solver = BruteForce::new(); + assert!(solver.find_witness(&problem).is_none()); +} + +#[test] +fn test_partition_into_forests_brute_force_all_valid() { + // Small acyclic graph (path 0-1-2), K=1: every assignment is valid + let problem = PartitionIntoForests::new(SimpleGraph::new(3, vec![(0, 1), (1, 2)]), 1); + let solutions = BruteForce::new().find_all_witnesses(&problem); + assert!(!solutions.is_empty()); + for sol in &solutions { + assert!(problem.evaluate(sol)); + } +} + +#[test] +fn test_partition_into_forests_serialization() { + let problem = two_triangle_instance(); + let json = serde_json::to_string(&problem).unwrap(); + let deserialized: PartitionIntoForests = serde_json::from_str(&json).unwrap(); + assert_eq!(deserialized.num_vertices(), 6); + assert_eq!(deserialized.num_edges(), 7); + assert_eq!(deserialized.num_forests(), 2); +} + +#[test] +#[should_panic(expected = "num_forests must be at least 1")] +fn test_partition_into_forests_rejects_zero_forests() { + let _ = PartitionIntoForests::new(SimpleGraph::new(2, vec![(0, 1)]), 0); +} From a9670e688d3e88fe77dc594101815ed2c4b3b7f1 Mon Sep 17 00:00:00 2001 From: Xiwei Pan Date: Mon, 30 Mar 2026 11:21:23 +0800 Subject: [PATCH 15/21] fix: resolve clippy needless_range_loop in OpenShopScheduling ILP Co-Authored-By: Claude Opus 4.6 (1M context) --- src/rules/openshopscheduling_ilp.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/rules/openshopscheduling_ilp.rs b/src/rules/openshopscheduling_ilp.rs index e3495797..7487c1b9 100644 --- a/src/rules/openshopscheduling_ilp.rs +++ b/src/rules/openshopscheduling_ilp.rs @@ -184,12 +184,12 @@ impl ReduceTo> for OpenShopScheduling { // (b) s_{j,i} - s_{k,i} + M*x ≥ p_{k,i} for j in 0..n { for k in (j + 1)..n { - for i in 0..m { + for (i, (&pji_val, &pki_val)) in p[j].iter().zip(p[k].iter()).enumerate() { let x = result.x_var(j, k, i); let sj = result.s_var(j, i); let sk = result.s_var(k, i); - let pji = p[j][i] as f64; - let pki = p[k][i] as f64; + let pji = pji_val as f64; + let pki = pki_val as f64; // (a) s_{k,i} - s_{j,i} - M*x_{j,k,i} >= p_{j,i} - M constraints.push(LinearConstraint::ge( From bfe6f235529791bdb3335af90fc73b890833bf47 Mon Sep 17 00:00:00 2001 From: Xiwei Pan Date: Mon, 30 Mar 2026 17:28:53 +0800 Subject: [PATCH 16/21] refactor: use i32 instead of usize for MinimumTardinessSequencing weight parameter MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Replace standalone `usize` weight type with `i32`, which integrates into the existing One → i32 → f64 variant hierarchy. This enables natural edges between MinimumTardinessSequencing and variants in the reduction graph. Remove now-unnecessary WeightElement and VariantParam impls for usize from types.rs. Co-Authored-By: Claude Opus 4.6 (1M context) --- problemreductions-cli/src/commands/create.rs | 6 +++--- .../misc/minimum_tardiness_sequencing.rs | 20 +++++++++--------- src/rules/minimumtardinesssequencing_ilp.rs | 10 ++++----- src/types.rs | 9 -------- .../misc/minimum_tardiness_sequencing.rs | 21 ++++++++----------- .../rules/minimumtardinesssequencing_ilp.rs | 11 ++++------ 6 files changed, 31 insertions(+), 46 deletions(-) diff --git a/problemreductions-cli/src/commands/create.rs b/problemreductions-cli/src/commands/create.rs index e61ff304..4b49a489 100644 --- a/problemreductions-cli/src/commands/create.rs +++ b/problemreductions-cli/src/commands/create.rs @@ -3767,8 +3767,8 @@ pub fn create(args: &CreateArgs, out: &OutputConfig) -> Result<()> { let precedences = parse_precedence_pairs(args.precedence_pairs.as_deref())?; if let Some(sizes_str) = args.sizes.as_deref() { - // Arbitrary-length variant (W = usize) - let lengths: Vec = util::parse_comma_list(sizes_str)?; + // Arbitrary-length variant (W = i32) + let lengths: Vec = util::parse_comma_list(sizes_str)?; anyhow::ensure!( lengths.len() == deadlines.len(), "sizes length ({}) must equal deadlines length ({})", @@ -3777,7 +3777,7 @@ pub fn create(args: &CreateArgs, out: &OutputConfig) -> Result<()> { ); validate_precedence_pairs(&precedences, lengths.len())?; ( - ser(MinimumTardinessSequencing::::with_lengths( + ser(MinimumTardinessSequencing::::with_lengths( lengths, deadlines, precedences, diff --git a/src/models/misc/minimum_tardiness_sequencing.rs b/src/models/misc/minimum_tardiness_sequencing.rs index 6b656f45..f507094d 100644 --- a/src/models/misc/minimum_tardiness_sequencing.rs +++ b/src/models/misc/minimum_tardiness_sequencing.rs @@ -6,7 +6,7 @@ //! //! Variants: //! - `MinimumTardinessSequencing` — unit-length tasks (`1|prec, pj=1|∑Uj`) -//! - `MinimumTardinessSequencing` — arbitrary-length tasks (`1|prec|∑Uj`) +//! - `MinimumTardinessSequencing` — arbitrary-length tasks (`1|prec|∑Uj`) use crate::registry::{FieldInfo, ProblemSchemaEntry, VariantDimension}; use crate::traits::Problem; @@ -18,7 +18,7 @@ inventory::submit! { name: "MinimumTardinessSequencing", display_name: "Minimum Tardiness Sequencing", aliases: &[], - dimensions: &[VariantDimension::new("weight", "One", &["One", "usize"])], + dimensions: &[VariantDimension::new("weight", "One", &["One", "i32"])], module_path: module_path!(), description: "Schedule tasks with precedence constraints and deadlines to minimize the number of tardy tasks", fields: &[ @@ -38,7 +38,7 @@ inventory::submit! { /// /// # Type Parameters /// -/// * `W` - The weight/length type. `One` for unit-length tasks, `usize` for arbitrary. +/// * `W` - The weight/length type. `One` for unit-length tasks, `i32` for arbitrary. /// /// # Example /// @@ -86,7 +86,7 @@ impl MinimumTardinessSequencing { } } -impl MinimumTardinessSequencing { +impl MinimumTardinessSequencing { /// Create a new arbitrary-length MinimumTardinessSequencing instance. /// /// # Panics @@ -94,7 +94,7 @@ impl MinimumTardinessSequencing { /// Panics if `lengths.len() != deadlines.len()`, if any length is 0, /// or if any task index in `precedences` is out of range. pub fn with_lengths( - lengths: Vec, + lengths: Vec, deadlines: Vec, precedences: Vec<(usize, usize)>, ) -> Self { @@ -206,12 +206,12 @@ impl Problem for MinimumTardinessSequencing { } } -impl Problem for MinimumTardinessSequencing { +impl Problem for MinimumTardinessSequencing { const NAME: &'static str = "MinimumTardinessSequencing"; type Value = Min; fn variant() -> Vec<(&'static str, &'static str)> { - crate::variant_params![usize] + crate::variant_params![i32] } fn dims(&self) -> Vec { @@ -234,7 +234,7 @@ impl Problem for MinimumTardinessSequencing { let mut completion = vec![0usize; n]; let mut cumulative = 0usize; for &task in &schedule { - cumulative += self.lengths[task]; + cumulative += self.lengths[task] as usize; completion[task] = cumulative; } @@ -248,7 +248,7 @@ impl Problem for MinimumTardinessSequencing { crate::declare_variants! { default MinimumTardinessSequencing => "2^num_tasks", - MinimumTardinessSequencing => "2^num_tasks", + MinimumTardinessSequencing => "2^num_tasks", } #[cfg(feature = "example-db")] @@ -272,7 +272,7 @@ pub(crate) fn canonical_model_example_specs() -> Vec::with_lengths( + instance: Box::new(MinimumTardinessSequencing::::with_lengths( vec![3, 2, 2, 1, 2], vec![4, 3, 8, 3, 6], vec![(0, 2), (1, 3)], diff --git a/src/rules/minimumtardinesssequencing_ilp.rs b/src/rules/minimumtardinesssequencing_ilp.rs index 05e790e7..f09bdc7f 100644 --- a/src/rules/minimumtardinesssequencing_ilp.rs +++ b/src/rules/minimumtardinesssequencing_ilp.rs @@ -33,7 +33,7 @@ impl ReductionResult for ReductionMTSToILP { } } -/// Result of reducing MinimumTardinessSequencing to ILP. +/// Result of reducing MinimumTardinessSequencing to ILP. #[derive(Debug, Clone)] pub struct ReductionMTSWeightedToILP { target: ILP, @@ -41,7 +41,7 @@ pub struct ReductionMTSWeightedToILP { } impl ReductionResult for ReductionMTSWeightedToILP { - type Source = MinimumTardinessSequencing; + type Source = MinimumTardinessSequencing; type Target = ILP; fn target_problem(&self) -> &ILP { @@ -129,14 +129,14 @@ impl ReduceTo> for MinimumTardinessSequencing { num_vars = "num_tasks * num_tasks + num_tasks", num_constraints = "2 * num_tasks + num_precedences + num_tasks * num_tasks", })] -impl ReduceTo> for MinimumTardinessSequencing { +impl ReduceTo> for MinimumTardinessSequencing { type Result = ReductionMTSWeightedToILP; fn reduce_to(&self) -> Self::Result { let n = self.num_tasks(); let num_x_vars = n * n; let num_vars = num_x_vars + n; - let total_length: usize = self.lengths().iter().copied().sum(); + let total_length: i32 = self.lengths().iter().copied().sum(); let big_m = total_length as f64; let x_var = |j: usize, p: usize| -> usize { j * n + p }; @@ -183,7 +183,7 @@ pub(crate) fn canonical_rule_example_specs() -> Vec::with_lengths( + let source = MinimumTardinessSequencing::::with_lengths( vec![2, 1, 3], vec![3, 4, 5], vec![(0, 2)], diff --git a/src/types.rs b/src/types.rs index 4b698f30..4d14f6ca 100644 --- a/src/types.rs +++ b/src/types.rs @@ -59,14 +59,6 @@ impl WeightElement for f64 { } } -impl WeightElement for usize { - type Sum = usize; - const IS_UNIT: bool = false; - fn to_sum(&self) -> usize { - *self - } -} - /// The constant 1. Unit weight for unweighted problems. /// /// When used as the weight type parameter `W`, indicates that all weights @@ -561,7 +553,6 @@ use crate::impl_variant_param; impl_variant_param!(f64, "weight"); impl_variant_param!(i32, "weight", parent: f64, cast: |w| *w as f64); impl_variant_param!(One, "weight", parent: i32, cast: |_| 1i32); -impl_variant_param!(usize, "weight"); #[cfg(test)] #[path = "unit_tests/types.rs"] diff --git a/src/unit_tests/models/misc/minimum_tardiness_sequencing.rs b/src/unit_tests/models/misc/minimum_tardiness_sequencing.rs index 4485cb34..704471b8 100644 --- a/src/unit_tests/models/misc/minimum_tardiness_sequencing.rs +++ b/src/unit_tests/models/misc/minimum_tardiness_sequencing.rs @@ -148,11 +148,11 @@ fn test_minimum_tardiness_sequencing_cyclic_precedences() { assert!(solver.find_witness(&problem).is_none()); } -// ===== Arbitrary-length variant (W = usize) ===== +// ===== Arbitrary-length variant (W = i32) ===== #[test] fn test_minimum_tardiness_sequencing_weighted_basic() { - let problem = MinimumTardinessSequencing::::with_lengths( + let problem = MinimumTardinessSequencing::::with_lengths( vec![3, 2, 2, 1, 2], vec![4, 3, 8, 3, 6], vec![(0, 2), (1, 3)], @@ -168,7 +168,7 @@ fn test_minimum_tardiness_sequencing_weighted_evaluate() { // Issue example: 5 tasks, lengths [3,2,2,1,2], deadlines [4,3,8,3,6], prec (0→2, 1→3) // Schedule: t0,t4,t2,t1,t3 // Lehmer [0,3,1,0,0] -> schedule [0,4,2,1,3] - let problem = MinimumTardinessSequencing::::with_lengths( + let problem = MinimumTardinessSequencing::::with_lengths( vec![3, 2, 2, 1, 2], vec![4, 3, 8, 3, 6], vec![(0, 2), (1, 3)], @@ -183,7 +183,7 @@ fn test_minimum_tardiness_sequencing_weighted_evaluate() { #[test] fn test_minimum_tardiness_sequencing_weighted_brute_force() { - let problem = MinimumTardinessSequencing::::with_lengths( + let problem = MinimumTardinessSequencing::::with_lengths( vec![3, 2, 2, 1, 2], vec![4, 3, 8, 3, 6], vec![(0, 2), (1, 3)], @@ -198,13 +198,10 @@ fn test_minimum_tardiness_sequencing_weighted_brute_force() { #[test] fn test_minimum_tardiness_sequencing_weighted_serialization() { - let problem = MinimumTardinessSequencing::::with_lengths( - vec![3, 2, 2], - vec![4, 3, 8], - vec![(0, 1)], - ); + let problem = + MinimumTardinessSequencing::::with_lengths(vec![3, 2, 2], vec![4, 3, 8], vec![(0, 1)]); let json = serde_json::to_value(&problem).unwrap(); - let restored: MinimumTardinessSequencing = serde_json::from_value(json).unwrap(); + let restored: MinimumTardinessSequencing = serde_json::from_value(json).unwrap(); assert_eq!(restored.num_tasks(), problem.num_tasks()); assert_eq!(restored.lengths(), problem.lengths()); assert_eq!(restored.deadlines(), problem.deadlines()); @@ -217,7 +214,7 @@ fn test_minimum_tardiness_sequencing_weighted_different_lengths() { // Schedule [0,1,2]: t0(l=1,fin=1≤2✓), t1(l=5,fin=6≤6✓), t2(l=1,fin=7>3✗) → 1 tardy // Schedule [1,0,2]: t1(l=5,fin=5≤6✓), t0(l=1,fin=6>2✗), t2(l=1,fin=7>3✗) → 2 tardy let problem = - MinimumTardinessSequencing::::with_lengths(vec![1, 5, 1], vec![2, 6, 3], vec![]); + MinimumTardinessSequencing::::with_lengths(vec![1, 5, 1], vec![2, 6, 3], vec![]); let solver = BruteForce::new(); let solution = solver .find_witness(&problem) @@ -228,7 +225,7 @@ fn test_minimum_tardiness_sequencing_weighted_different_lengths() { #[test] #[should_panic(expected = "all task lengths must be positive")] fn test_minimum_tardiness_sequencing_weighted_zero_length() { - MinimumTardinessSequencing::::with_lengths(vec![1, 0, 2], vec![3, 3, 3], vec![]); + MinimumTardinessSequencing::::with_lengths(vec![1, 0, 2], vec![3, 3, 3], vec![]); } #[test] diff --git a/src/unit_tests/rules/minimumtardinesssequencing_ilp.rs b/src/unit_tests/rules/minimumtardinesssequencing_ilp.rs index 21a0deae..ba211afe 100644 --- a/src/unit_tests/rules/minimumtardinesssequencing_ilp.rs +++ b/src/unit_tests/rules/minimumtardinesssequencing_ilp.rs @@ -68,23 +68,20 @@ fn test_minimumtardinesssequencing_to_ilp_all_tight() { #[test] fn test_minimumtardinesssequencing_weighted_to_ilp_closed_loop() { - let problem = MinimumTardinessSequencing::::with_lengths( - vec![2, 1, 3], - vec![3, 4, 5], - vec![(0, 2)], - ); + let problem = + MinimumTardinessSequencing::::with_lengths(vec![2, 1, 3], vec![3, 4, 5], vec![(0, 2)]); let reduction = ReduceTo::>::reduce_to(&problem); assert_optimization_round_trip_from_optimization_target( &problem, &reduction, - "MinimumTardinessSequencing->ILP closed loop", + "MinimumTardinessSequencing->ILP closed loop", ); } #[test] fn test_minimumtardinesssequencing_weighted_to_ilp_vs_brute_force() { - let problem = MinimumTardinessSequencing::::with_lengths( + let problem = MinimumTardinessSequencing::::with_lengths( vec![3, 2, 2, 1, 2], vec![4, 3, 8, 3, 6], vec![(0, 2), (1, 3)], From bfb80423f56477b1da8d8bb554720be433c4d32d Mon Sep 17 00:00:00 2001 From: Xiwei Pan Date: Tue, 31 Mar 2026 02:36:48 +0800 Subject: [PATCH 17/21] feat: add 24 new problem models with tests, paper entries, and CLI support MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Models span all categories: graph (10), formula (3), algebraic (3), set (1), and misc (7). Each model includes unit tests (7-19 per model), canonical examples, paper problem-def entries, and CLI create handlers. Also fixes: Typst compile error in MinimumMatrixDomination example, missing bouchez2006 bib entry, sect→inter deprecation warning. Co-Authored-By: Claude Opus 4.6 (1M context) --- docs/paper/reductions.typ | 627 ++++++++++++++ docs/paper/references.bib | 23 + problemreductions-cli/src/cli.rs | 35 +- problemreductions-cli/src/commands/create.rs | 810 +++++++++++++++++- src/lib.rs | 7 +- .../algebraic/algebraic_equations_over_gf2.rs | 225 +++++ .../algebraic/minimum_matrix_domination.rs | 192 +++++ ...mum_weight_solution_to_linear_equations.rs | 227 +++++ src/models/algebraic/mod.rs | 12 + src/models/formula/mod.rs | 12 + src/models/formula/non_tautology.rs | 169 ++++ .../formula/one_in_three_satisfiability.rs | 183 ++++ src/models/formula/planar_3_satisfiability.rs | 180 ++++ .../graph/bounded_diameter_spanning_tree.rs | 310 +++++++ .../graph/degree_constrained_spanning_tree.rs | 209 +++++ src/models/graph/kernel.rs | 153 ++++ src/models/graph/maximum_achromatic_number.rs | 179 ++++ .../graph/minimum_covering_by_cliques.rs | 195 +++++ ...imum_geometric_connected_dominating_set.rs | 207 +++++ .../graph/minimum_intersection_graph_basis.rs | 184 ++++ src/models/graph/mod.rs | 40 + src/models/graph/monochromatic_triangle.rs | 197 +++++ src/models/graph/partition_into_cliques.rs | 182 ++++ .../graph/partition_into_perfect_matchings.rs | 190 ++++ src/models/misc/betweenness.rs | 186 ++++ src/models/misc/cyclic_ordering.rs | 191 +++++ src/models/misc/dynamic_storage_allocation.rs | 189 ++++ .../misc/feasible_register_assignment.rs | 256 ++++++ src/models/misc/mod.rs | 28 + .../misc/non_liveness_free_petri_net.rs | 440 ++++++++++ .../misc/numerical_3_dimensional_matching.rs | 235 +++++ src/models/misc/subset_product.rs | 221 +++++ src/models/mod.rs | 70 +- src/models/set/mod.rs | 7 + src/models/set/three_dimensional_matching.rs | 199 +++++ .../algebraic/algebraic_equations_over_gf2.rs | 191 +++++ .../algebraic/minimum_matrix_domination.rs | 180 ++++ ...mum_weight_solution_to_linear_equations.rs | 139 +++ .../models/formula/non_tautology.rs | 90 ++ .../formula/one_in_three_satisfiability.rs | 124 +++ .../models/formula/planar_3_satisfiability.rs | 141 +++ .../graph/bounded_diameter_spanning_tree.rs | 134 +++ .../graph/degree_constrained_spanning_tree.rs | 128 +++ src/unit_tests/models/graph/kernel.rs | 106 +++ .../models/graph/maximum_achromatic_number.rs | 96 +++ .../graph/minimum_covering_by_cliques.rs | 132 +++ ...imum_geometric_connected_dominating_set.rs | 128 +++ .../graph/minimum_intersection_graph_basis.rs | 110 +++ .../models/graph/monochromatic_triangle.rs | 112 +++ .../models/graph/partition_into_cliques.rs | 122 +++ .../graph/partition_into_perfect_matchings.rs | 115 +++ src/unit_tests/models/misc/betweenness.rs | 134 +++ src/unit_tests/models/misc/cyclic_ordering.rs | 131 +++ .../models/misc/dynamic_storage_allocation.rs | 152 ++++ .../misc/feasible_register_assignment.rs | 154 ++++ .../misc/non_liveness_free_petri_net.rs | 184 ++++ .../misc/numerical_3_dimensional_matching.rs | 186 ++++ src/unit_tests/models/misc/subset_product.rs | 176 ++++ .../models/set/three_dimensional_matching.rs | 136 +++ 59 files changed, 10018 insertions(+), 53 deletions(-) create mode 100644 src/models/algebraic/algebraic_equations_over_gf2.rs create mode 100644 src/models/algebraic/minimum_matrix_domination.rs create mode 100644 src/models/algebraic/minimum_weight_solution_to_linear_equations.rs create mode 100644 src/models/formula/non_tautology.rs create mode 100644 src/models/formula/one_in_three_satisfiability.rs create mode 100644 src/models/formula/planar_3_satisfiability.rs create mode 100644 src/models/graph/bounded_diameter_spanning_tree.rs create mode 100644 src/models/graph/degree_constrained_spanning_tree.rs create mode 100644 src/models/graph/kernel.rs create mode 100644 src/models/graph/maximum_achromatic_number.rs create mode 100644 src/models/graph/minimum_covering_by_cliques.rs create mode 100644 src/models/graph/minimum_geometric_connected_dominating_set.rs create mode 100644 src/models/graph/minimum_intersection_graph_basis.rs create mode 100644 src/models/graph/monochromatic_triangle.rs create mode 100644 src/models/graph/partition_into_cliques.rs create mode 100644 src/models/graph/partition_into_perfect_matchings.rs create mode 100644 src/models/misc/betweenness.rs create mode 100644 src/models/misc/cyclic_ordering.rs create mode 100644 src/models/misc/dynamic_storage_allocation.rs create mode 100644 src/models/misc/feasible_register_assignment.rs create mode 100644 src/models/misc/non_liveness_free_petri_net.rs create mode 100644 src/models/misc/numerical_3_dimensional_matching.rs create mode 100644 src/models/misc/subset_product.rs create mode 100644 src/models/set/three_dimensional_matching.rs create mode 100644 src/unit_tests/models/algebraic/algebraic_equations_over_gf2.rs create mode 100644 src/unit_tests/models/algebraic/minimum_matrix_domination.rs create mode 100644 src/unit_tests/models/algebraic/minimum_weight_solution_to_linear_equations.rs create mode 100644 src/unit_tests/models/formula/non_tautology.rs create mode 100644 src/unit_tests/models/formula/one_in_three_satisfiability.rs create mode 100644 src/unit_tests/models/formula/planar_3_satisfiability.rs create mode 100644 src/unit_tests/models/graph/bounded_diameter_spanning_tree.rs create mode 100644 src/unit_tests/models/graph/degree_constrained_spanning_tree.rs create mode 100644 src/unit_tests/models/graph/kernel.rs create mode 100644 src/unit_tests/models/graph/maximum_achromatic_number.rs create mode 100644 src/unit_tests/models/graph/minimum_covering_by_cliques.rs create mode 100644 src/unit_tests/models/graph/minimum_geometric_connected_dominating_set.rs create mode 100644 src/unit_tests/models/graph/minimum_intersection_graph_basis.rs create mode 100644 src/unit_tests/models/graph/monochromatic_triangle.rs create mode 100644 src/unit_tests/models/graph/partition_into_cliques.rs create mode 100644 src/unit_tests/models/graph/partition_into_perfect_matchings.rs create mode 100644 src/unit_tests/models/misc/betweenness.rs create mode 100644 src/unit_tests/models/misc/cyclic_ordering.rs create mode 100644 src/unit_tests/models/misc/dynamic_storage_allocation.rs create mode 100644 src/unit_tests/models/misc/feasible_register_assignment.rs create mode 100644 src/unit_tests/models/misc/non_liveness_free_petri_net.rs create mode 100644 src/unit_tests/models/misc/numerical_3_dimensional_matching.rs create mode 100644 src/unit_tests/models/misc/subset_product.rs create mode 100644 src/unit_tests/models/set/three_dimensional_matching.rs diff --git a/docs/paper/reductions.typ b/docs/paper/reductions.typ index 5ed406bf..c60593ad 100644 --- a/docs/paper/reductions.typ +++ b/docs/paper/reductions.typ @@ -77,6 +77,9 @@ // Problem display names for theorem headers #let display-name = ( "AdditionalKey": [Additional Key], + "AlgebraicEquationsOverGF2": [Algebraic Equations over GF(2)], + "Betweenness": [Betweenness], + "CyclicOrdering": [Cyclic Ordering], "AcyclicPartition": [Acyclic Partition], "MaximumIndependentSet": [Maximum Independent Set], "MinimumVertexCover": [Minimum Vertex Cover], @@ -86,6 +89,8 @@ "BiconnectivityAugmentation": [Biconnectivity Augmentation], "HamiltonianPath": [Hamiltonian Path], "HamiltonianPathBetweenTwoVertices": [Hamiltonian Path Between Two Vertices], + "BoundedDiameterSpanningTree": [Bounded Diameter Spanning Tree], + "DegreeConstrainedSpanningTree": [Degree-Constrained Spanning Tree], "DirectedHamiltonianPath": [Directed Hamiltonian Path], "IntegralFlowBundles": [Integral Flow with Bundles], "LongestCircuit": [Longest Circuit], @@ -96,10 +101,15 @@ "PathConstrainedNetworkFlow": [Path-Constrained Network Flow], "LengthBoundedDisjointPaths": [Length-Bounded Disjoint Paths], "IsomorphicSpanningTree": [Isomorphic Spanning Tree], + "Kernel": [Kernel], "KthBestSpanningTree": [Kth Best Spanning Tree], + "MaximumAchromaticNumber": [Maximum Achromatic Number], "KColoring": [$k$-Coloring], "KClique": [$k$-Clique], + "MinimumCoveringByCliques": [Minimum Covering by Cliques], + "MinimumIntersectionGraphBasis": [Minimum Intersection Graph Basis], "MinimumDominatingSet": [Minimum Dominating Set], + "MinimumGeometricConnectedDominatingSet": [Minimum Geometric Connected Dominating Set], "MaximumMatching": [Maximum Matching], "MinimumMaximalMatching": [Minimum Maximal Matching], "BottleneckTravelingSalesman": [Bottleneck Traveling Salesman], @@ -121,6 +131,9 @@ "Satisfiability": [SAT], "NAESatisfiability": [NAE-SAT], "KSatisfiability": [$k$-SAT], + "NonTautology": [Non-Tautology], + "OneInThreeSatisfiability": [1-in-3 SAT], + "Planar3Satisfiability": [Planar 3-SAT], "CircuitSAT": [CircuitSAT], "ConjunctiveQueryFoldability": [Conjunctive Query Foldability], "EnsembleComputation": [Ensemble Computation], @@ -148,10 +161,14 @@ "StackerCrane": [Stacker Crane], "LongestCommonSubsequence": [Longest Common Subsequence], "ExactCoverBy3Sets": [Exact Cover by 3-Sets], + "ThreeDimensionalMatching": [Three-Dimensional Matching], + "SubsetProduct": [Subset Product], "SubsetSum": [Subset Sum], "CosineProductIntegration": [Cosine Product Integration], "Partition": [Partition], "ThreePartition": [3-Partition], + "DynamicStorageAllocation": [Dynamic Storage Allocation], + "Numerical3DimensionalMatching": [Numerical 3-Dimensional Matching], "PartialFeedbackEdgeSet": [Partial Feedback Edge Set], "MinimumFeedbackArcSet": [Minimum Feedback Arc Set], "MinimumFeedbackVertexSet": [Minimum Feedback Vertex Set], @@ -161,6 +178,8 @@ "ConsecutiveOnesSubmatrix": [Consecutive Ones Submatrix], "FeasibleBasisExtension": [Feasible Basis Extension], "SparseMatrixCompression": [Sparse Matrix Compression], + "MinimumMatrixDomination": [Minimum Matrix Domination], + "MinimumWeightSolutionToLinearEquations": [Minimum Weight Solution to Linear Equations], "DirectedTwoCommodityIntegralFlow": [Directed Two-Commodity Integral Flow], "IntegralFlowHomologousArcs": [Integral Flow with Homologous Arcs], "IntegralFlowWithMultipliers": [Integral Flow With Multipliers], @@ -174,12 +193,16 @@ "MinimumDummyActivitiesPert": [Minimum Dummy Activities in PERT Networks], "MinimumSumMulticenter": [Minimum Sum Multicenter], "MinimumTardinessSequencing": [Minimum Tardiness Sequencing], + "MonochromaticTriangle": [Monochromatic Triangle], "MultipleChoiceBranching": [Multiple Choice Branching], "MultipleCopyFileAllocation": [Multiple Copy File Allocation], "ExpectedRetrievalCost": [Expected Retrieval Cost], "MultiprocessorScheduling": [Multiprocessor Scheduling], + "NonLivenessFreePetriNet": [Non-Liveness Free Petri Net], "ProductionPlanning": [Production Planning], + "PartitionIntoCliques": [Partition into Cliques], "PartitionIntoForests": [Partition into Forests], + "PartitionIntoPerfectMatchings": [Partition into Perfect Matchings], "PartitionIntoPathsOfLength2": [Partition into Paths of Length 2], "PartitionIntoTriangles": [Partition Into Triangles], "PrecedenceConstrainedScheduling": [Precedence Constrained Scheduling], @@ -192,6 +215,7 @@ "SimultaneousIncongruences": [Simultaneous Incongruences], "QuantifiedBooleanFormulas": [Quantified Boolean Formulas (QBF)], "RectilinearPictureCompression": [Rectilinear Picture Compression], + "FeasibleRegisterAssignment": [Feasible Register Assignment], "RegisterSufficiency": [Register Sufficiency], "ResourceConstrainedScheduling": [Resource Constrained Scheduling], "RootedTreeStorageAssignment": [Rooted Tree Storage Assignment], @@ -1191,6 +1215,27 @@ is feasible: each set induces a connected subgraph, the component weights are $2 ] ] } +#{ + let x = load-model-example("Kernel") + let nv = x.instance.graph.num_vertices + let arcs = x.instance.graph.arcs + [ + #problem-def("Kernel")[ + Given a directed graph $G = (V, A)$, find a _kernel_ $V' subset.eq V$ such that (1) $V'$ is _independent_ — no arc joins any two vertices in $V'$ — and (2) $V'$ is _absorbing_ — every vertex $u in.not V'$ has an arc $(u, v) in A$ for some $v in V'$. + ][ + A classical graph-theoretic concept introduced by von Neumann and Morgenstern (1944) in the context of game theory. Deciding whether a directed graph has a kernel is NP-complete in general, though every DAG has a unique kernel. Kernels appear in combinatorial game theory, graph coloring (Galvin's theorem), and stable set problems on digraphs. + + Variables: A binary vector of length $|V|$, where $x_v = 1$ iff vertex $v$ is in the kernel. + + *Example.* Consider the directed graph $G$ on #nv vertices with arcs ${#arcs.map(((u, v)) => $(#u arrow.r #v)$).join(", ")}$. The kernel $V' = {0, 3}$ is independent (no arc between 0 and 3) and absorbing (vertex 1 has arc to 3, vertex 2 has arc to 3, vertex 4 has arc to 0). + + #pred-commands( + "pred create --example Kernel -o kernel.json", + "pred solve kernel.json", + ) + ] + ] +} #{ let x = load-model-example("LongestPath") let nv = graph-num-vertices(x.instance) @@ -1616,6 +1661,31 @@ is feasible: each set induces a connected subgraph, the component weights are $2 ] ] } +#{ + let x = load-model-example("MaximumAchromaticNumber") + let nv = graph-num-vertices(x.instance) + let ne = graph-num-edges(x.instance) + let edges = x.instance.graph.edges + let sol = (config: x.optimal_config, metric: x.optimal_value) + let num-colors = metric-value(sol.metric) + let coloring = sol.config + let color-groups = range(num-colors).map(c => coloring.enumerate().filter(((i, v)) => v == c).map(((i, _)) => i)) + [ + #problem-def("MaximumAchromaticNumber")[ + Given an undirected graph $G = (V, E)$, find a proper vertex coloring $c: V -> {1, dots, k}$ that is _complete_ --- for every pair of distinct colors $i, j$ there exists an edge $(u, v) in E$ with $c(u) = i$ and $c(v) = j$ --- maximizing the number of colors $k$. + ][ + The achromatic number $psi(G)$ is the largest $k$ such that $G$ admits a complete proper $k$-coloring. It was introduced by Harary and Hedetniemi (1970) and shown NP-hard @garey1979[GT5]. Applications include network partition and information dissemination. Brute-force enumeration runs in $O^*(n^n)$ time. + + *Example.* Consider the 6-cycle $C_6$ with $n = #nv$ vertices and $|E| = #ne$ edges: #edges.map(((u, v)) => [${#u, #v}$]).join(", "). The coloring #range(nv).map(i => $c(v_#i) = #(coloring.at(i) + 1)$).join(", ") uses $#num-colors$ colors. It is proper (no adjacent pair shares a color) and complete: every pair of color classes is connected by at least one edge. Thus $psi(C_6) >= #num-colors$. + + #pred-commands( + "pred create --example MaximumAchromaticNumber -o achromatic.json", + "pred solve achromatic.json", + "pred evaluate achromatic.json --config " + x.optimal_config.map(str).join(","), + ) + ] + ] +} #{ let x = load-model-example("MinimumDominatingSet") let nv = graph-num-vertices(x.instance) @@ -1656,6 +1726,76 @@ is feasible: each set induces a connected subgraph, the component weights are $2 ] ] } +#{ + let x = load-model-example("MinimumGeometricConnectedDominatingSet") + let n = x.instance.points.len() + let B = x.instance.radius + let sol = (config: x.optimal_config, metric: x.optimal_value) + let S = sol.config.enumerate().filter(((i, v)) => v == 1).map(((i, _)) => i) + let wS = metric-value(sol.metric) + [ + #problem-def("MinimumGeometricConnectedDominatingSet")[ + Given points $P = {p_1, dots, p_n}$ in $RR^2$ and distance threshold $B > 0$, find $P' subset.eq P$ minimizing $|P'|$ s.t. (1) $forall p in P backslash P': exists q in P'$ with $d(p,q) <= B$ (domination), and (2) the unit-disk graph on $P'$ with radius $B$ is connected. + ][ + Geometric Connected Dominating Set arises in wireless ad-hoc networks: selected nodes form a connected backbone that covers all other nodes within communication range. The problem is NP-hard and generalizes both dominating set (dropping connectivity) and connected subgraph (dropping domination). + + *Example.* Consider $n = #n$ points arranged in a $4 times 2$ ladder with spacing $3$ and threshold $B = #B$. The bottom row $P' = {#S.map(i => $p_#i$).join(", ")}$ forms a minimum connected dominating set of size $#wS$: each bottom-row point dominates the top-row point directly above it (vertical distance $3 <= #B$), and consecutive bottom-row points are within distance $3 <= #B$ of each other, so $P'$ induces a connected path. + + #pred-commands( + "pred create --example MinimumGeometricConnectedDominatingSet -o mgcds.json", + "pred solve mgcds.json", + "pred evaluate mgcds.json --config " + x.optimal_config.map(str).join(","), + ) + ] + ] +} +#{ + let x = load-model-example("MinimumCoveringByCliques") + let nv = graph-num-vertices(x.instance) + let ne = graph-num-edges(x.instance) + let edges = x.instance.graph.edges + let sol = (config: x.optimal_config, metric: x.optimal_value) + let num-cliques = metric-value(sol.metric) + let groups = range(num-cliques).map(c => sol.config.enumerate().filter(((i, v)) => v == c).map(((i, _)) => i)) + [ + #problem-def("MinimumCoveringByCliques")[ + Given an undirected graph $G = (V, E)$, find a collection of cliques $C_1, dots, C_k$ in $G$ such that every edge $e in E$ is contained in at least one $C_i$, and the number of cliques $k$ is minimized. + ][ + Minimum Covering by Cliques (also called _edge clique cover_) is NP-hard @garey1979[GT59]. Applications include intersection graph recognition and computational biology. The minimum edge clique cover number equals the minimum dimension of a dot-product representation of the graph. Brute-force enumeration runs in $O^*(2^(|E|))$ time. + + *Example.* Consider $G$ with $n = #nv$ vertices and $|E| = #ne$ edges: #edges.map(((u, v)) => [${#u, #v}$]).join(", "). An optimal cover uses $#num-cliques$ cliques: #groups.enumerate().filter(((_, g)) => g.len() > 0).map(((i, g)) => [$C_#(i + 1)$: edges #g.map(j => str(j)).join(", ")]).join("; "). + + #pred-commands( + "pred create --example MinimumCoveringByCliques -o covering-by-cliques.json", + "pred solve covering-by-cliques.json", + "pred evaluate covering-by-cliques.json --config " + x.optimal_config.map(str).join(","), + ) + ] + ] +} +#{ + let x = load-model-example("MinimumIntersectionGraphBasis") + let nv = graph-num-vertices(x.instance) + let ne = graph-num-edges(x.instance) + let edges = x.instance.graph.edges + let sol = (config: x.optimal_config, metric: x.optimal_value) + let universe-size = metric-value(sol.metric) + [ + #problem-def("MinimumIntersectionGraphBasis")[ + Given an undirected graph $G = (V, E)$, find a universe $U$ of minimum cardinality and an assignment of subsets $S_v subset.eq U$ for each vertex $v in V$ such that two vertices $u, v$ are adjacent if and only if $S_u inter S_v eq.not emptyset$. The minimum $|U|$ is the _intersection number_ of $G$. + ][ + Minimum Intersection Graph Basis is NP-hard @garey1979[GT60]. Every graph is an intersection graph; the intersection number measures how compactly such a representation can be chosen. The intersection number is at most $|E|$. Brute-force enumeration runs in $O^*(|E|^(|E|))$ time. + + *Example.* Consider the path $P_3$ with $n = #nv$ vertices and $|E| = #ne$ edges: #edges.map(((u, v)) => [${#u, #v}$]).join(", "). An optimal representation uses $#universe-size$ elements: $S_0 = {0}$, $S_1 = {0, 1}$, $S_2 = {1}$. + + #pred-commands( + "pred create --example MinimumIntersectionGraphBasis -o intersection-basis.json", + "pred solve intersection-basis.json", + "pred evaluate intersection-basis.json --config " + x.optimal_config.map(str).join(","), + ) + ] + ] +} #{ let x = load-model-example("MaximumMatching") let nv = graph-num-vertices(x.instance) @@ -2834,6 +2974,34 @@ A classical NP-complete problem from Garey and Johnson @garey1979[Ch.~3, p.~76], ] } +#{ + let tdm = load-model-example("ThreeDimensionalMatching") + let q = tdm.instance.universe_size + let triples = tdm.instance.triples + let m = triples.len() + let sol = tdm.optimal_config + // Format a triple as (w+1, x+1, y+1) using 1-indexed notation + let fmt-triple(t) = "$(" + str(t.at(0) + 1) + ", " + str(t.at(1) + 1) + ", " + str(t.at(2) + 1) + ")$" + // Collect indices of selected triples (0-indexed) + let selected = sol.enumerate().filter(((i, v)) => v == 1).map(((i, _)) => i) + + [ + #problem-def("ThreeDimensionalMatching")[ + Given disjoint sets $W$, $X$, $Y$ each with $q$ elements and a set $M subset.eq W times X times Y$ of triples, does $M$ contain a _matching_ — a subset $M' subset.eq M$ with $|M'| = q$ such that no two triples in $M'$ agree in any coordinate? + ][ + Shown NP-complete by Karp (1972) @karp1972. Three-Dimensional Matching (3DM) is one of the six basic NP-complete problems in Garey & Johnson (A3 SP1) and is closely related to Exact Cover by 3-Sets. While X3C asks for a perfect partition of a single universe into disjoint triples, 3DM asks for a system of distinct representatives across three separate dimensions. The problem remains NP-complete even when each element appears in at most three triples. The direct brute-force algorithm runs in $O^*(2^m)$ time where $m = |M|$. + + *Example.* Let $W = X = Y = {1, 2, dots, #q}$ and $M = {t_1, dots, t_#m}$ with #triples.enumerate().map(((i, t)) => $t_#(i + 1) = #fmt-triple(t)$).join(", "). A valid matching is $M' = {#selected.map(i => $t_#(i + 1)$).join(", ")}$: the $W$-coordinates, $X$-coordinates, and $Y$-coordinates are each pairwise distinct, and $|M'| = #selected.len() = q$. + + #pred-commands( + "pred create --example ThreeDimensionalMatching -o three-dimensional-matching.json", + "pred solve three-dimensional-matching.json", + "pred evaluate three-dimensional-matching.json --config " + tdm.optimal_config.map(str).join(","), + ) + ] + ] +} + #{ let x = load-model-example("ComparativeContainment") let n = x.instance.universe_size @@ -3435,6 +3603,30 @@ A classical NP-complete problem from Garey and Johnson @garey1979[Ch.~3, p.~76], ] } +#{ + let x = load-model-example("AlgebraicEquationsOverGF2") + let n = x.instance.num_variables + let eqs = x.instance.equations + let config = x.optimal_config + [ + #problem-def("AlgebraicEquationsOverGF2")[ + Given $n$ Boolean variables $x_0, dots, x_(n-1)$ and $m$ multilinear polynomials $p_1, dots, p_m$ over $"GF"(2)$, determine whether there exists an assignment $(x_0, dots, x_(n-1)) in {0,1}^n$ such that $p_j (x_0, dots, x_(n-1)) = 0$ for all $j = 1, dots, m$. + + Each polynomial is a sum (XOR) of monomials over $"GF"(2)$. Each monomial is a product (AND) of a subset of the variables; the empty product denotes the constant $1$. + ][ + Algebraic Equations over $"GF"(2)$ generalises systems of linear equations over $"GF"(2)$ by allowing higher-degree monomials. The problem is NP-complete in general @garey1979. When restricted to degree $<= 1$ (linear equations only), the system reduces to Gaussian elimination over $"GF"(2)$ and is solvable in polynomial time. + + *Example.* Let $n = #n$ with #(eqs.len()) equations. The assignment $(#(config.map(str).join(", ")))$ satisfies all equations. + + #pred-commands( + "pred create --example AlgebraicEquationsOverGF2 -o agf2.json", + "pred solve agf2.json --solver brute-force", + "pred evaluate agf2.json --config " + config.map(str).join(","), + ) + ] + ] +} + #{ let x = load-model-example("QuadraticCongruences") let a = x.instance.a @@ -3795,6 +3987,89 @@ A classical NP-complete problem from Garey and Johnson @garey1979[Ch.~3, p.~76], ] } +#{ + let x = load-model-example("Planar3Satisfiability") + let n = x.instance.num_vars + let m = x.instance.clauses.len() + let clauses = x.instance.clauses + let sol = (config: x.optimal_config, metric: x.optimal_value) + let assign = sol.config + let fmt-lit(l) = if l > 0 { $x_#l$ } else { $not x_#(-l)$ } + let fmt-clause(c) = $paren.l #c.literals.map(fmt-lit).join($or$) paren.r$ + let eval-lit(l) = if l > 0 { assign.at(l - 1) } else { 1 - assign.at(-l - 1) } + [ + #problem-def("Planar3Satisfiability")[ + Given a 3-CNF formula $phi = and.big_(j=1)^m C_j$ with $m$ clauses over $n$ Boolean variables, where each clause $C_j$ contains exactly 3 literals, and the variable-clause incidence graph $H(phi)$ is planar, find a satisfying assignment $bold(x) in {0, 1}^n$. + ][ + Planar 3-SAT is a restricted variant of 3-SAT introduced by Lichtenstein @lichtenstein1982, who proved it NP-complete. The incidence graph $H(phi)$ is bipartite with variable nodes and clause nodes, connected by edges when a variable appears in a clause. Requiring $H(phi)$ to be planar is a strong structural constraint that enables reductions to geometric and planar problems (e.g., rectilinear Steiner tree, planar vertex cover). The best known algorithm shares the 3-SAT bound of $O^*(1.307^n)$ via biased-PPSZ @hansen2019, since any Planar 3-SAT instance is also a valid 3-SAT instance. + + *Example.* Consider $phi = #clauses.map(fmt-clause).join($and$)$ with $n = #n$ variables and $m = #m$ clauses. The assignment $(#range(n).map(i => $x_#(i + 1)$).join(",")) = (#assign.map(v => str(v)).join(", "))$ satisfies all clauses: #clauses.enumerate().map(((j, c)) => $C_#(j + 1) = paren.l #c.literals.map(l => str(eval-lit(l))).join($or$) paren.r = 1$).join(", "). + + #pred-commands( + "pred create --example Planar3Satisfiability -o planar3sat.json", + "pred solve planar3sat.json", + "pred evaluate planar3sat.json --config " + x.optimal_config.map(str).join(","), + ) + ] + ] +} + +#{ + let x = load-model-example("OneInThreeSatisfiability") + let n = x.instance.num_vars + let m = x.instance.clauses.len() + let clauses = x.instance.clauses + let sol = (config: x.optimal_config, metric: x.optimal_value) + let assign = sol.config + let fmt-lit(l) = if l > 0 { $x_#l$ } else { $not x_#(-l)$ } + let fmt-clause(c) = $paren.l #c.literals.map(fmt-lit).join($or$) paren.r$ + let eval-lit(l) = if l > 0 { assign.at(l - 1) } else { 1 - assign.at(-l - 1) } + let count-true(c) = c.literals.map(eval-lit).sum() + [ + #problem-def("OneInThreeSatisfiability")[ + Given a CNF formula $phi = and.big_(j=1)^m C_j$ with $m$ clauses over $n$ Boolean variables, where each clause $C_j$ contains exactly 3 literals, find a truth assignment $bold(x) in {0, 1}^n$ such that each clause has _exactly one_ true literal. + ][ + One-in-Three Satisfiability (1-in-3 SAT) was introduced by Schaefer @schaefer1978 as part of his dichotomy theorem for generalized satisfiability. Unlike standard 3-SAT which requires at least one true literal per clause, 1-in-3 SAT requires exactly one. The problem is NP-complete even for monotone instances (no negations). The best known algorithm runs in $O^*(1.307^n)$ time via biased-PPSZ @hansen2019, since every 1-in-3 SAT instance reduces trivially to 3-SAT. + + *Example.* Consider $phi = #clauses.map(fmt-clause).join($and$)$ with $n = #n$ variables and $m = #m$ clauses. The assignment $(#range(n).map(i => $x_#(i + 1)$).join(",")) = (#assign.map(v => str(v)).join(", "))$ satisfies the 1-in-3 condition: #clauses.enumerate().map(((j, c)) => $C_#(j + 1)$+ " has " + str(count-true(c)) + " true literal").join(", "). + + #pred-commands( + "pred create --example OneInThreeSatisfiability -o 1in3sat.json", + "pred solve 1in3sat.json", + "pred evaluate 1in3sat.json --config " + x.optimal_config.map(str).join(","), + ) + ] + ] +} + +#{ + let x = load-model-example("NonTautology") + let n = x.instance.num_vars + let m = x.instance.disjuncts.len() + let disjuncts = x.instance.disjuncts + let sol = (config: x.optimal_config, metric: x.optimal_value) + let assign = sol.config + let fmt-lit(l) = if l > 0 { $x_#l$ } else { $not x_#(-l)$ } + let fmt-disjunct(d) = $paren.l #d.map(fmt-lit).join($and$) paren.r$ + let eval-lit(l) = if l > 0 { assign.at(l - 1) } else { 1 - assign.at(-l - 1) } + let disjunct-true(d) = d.map(eval-lit).all(v => v == 1) + [ + #problem-def("NonTautology")[ + Given a Boolean formula in DNF $phi = or.big_(j=1)^m D_j$ with $m$ disjuncts over $n$ Boolean variables, where each disjunct $D_j$ is a conjunction of literals, find a truth assignment $bold(x) in {0, 1}^n$ such that $phi(bold(x)) = 0$ (i.e., every disjunct is false). + ][ + The Non-Tautology problem asks whether a given DNF formula is _not_ a tautology, by finding a falsifying assignment. A disjunct $D_j = ell_1 and dots and ell_k$ is false when at least one of its literals evaluates to false; the formula is false when all disjuncts are false. The problem is coNP-complete in general and closely related to SAT through De Morgan duality: a DNF formula $phi$ is a tautology iff $not phi$ (a CNF formula) is unsatisfiable. + + *Example.* Consider $phi = #disjuncts.map(fmt-disjunct).join($or$)$ with $n = #n$ variables and $m = #m$ disjuncts. The assignment $(#range(n).map(i => $x_#(i + 1)$).join(",")) = (#assign.map(v => str(v)).join(", "))$ falsifies the formula: #disjuncts.enumerate().map(((j, d)) => $D_#(j + 1)$+ " is " + if disjunct-true(d) { "true" } else { "false" }).join(", "). + + #pred-commands( + "pred create --example NonTautology -o nontaut.json", + "pred solve nontaut.json", + "pred evaluate nontaut.json --config " + x.optimal_config.map(str).join(","), + ) + ] + ] +} + #{ let x = load-model-example("CircuitSAT") let vars = x.instance.variables @@ -4350,6 +4625,132 @@ A classical NP-complete problem from Garey and Johnson @garey1979[Ch.~3, p.~76], ] } +#{ + let x = load-model-example("DegreeConstrainedSpanningTree") + let nv = graph-num-vertices(x.instance) + let ne = graph-num-edges(x.instance) + let edges = x.instance.graph.edges + let K = x.instance.max_degree + let sol = (config: x.optimal_config, metric: x.optimal_value) + let selected-edges = sol.config.enumerate().filter(((i, v)) => v == 1).map(((i, _)) => i) + [ + #problem-def("DegreeConstrainedSpanningTree")[ + Given an undirected graph $G = (V, E)$ and a positive integer $K$, determine whether $G$ contains a spanning tree $T$ in which every vertex has degree at most $K$. + ][ + Degree-Constrained Spanning Tree is NP-complete @garey1979[ND1]. The problem generalises the Hamiltonian Path problem (set $K = 2$). The best known exact algorithm uses brute-force enumeration in $O^*(2^n)$ time, where $n = |V|$. + + *Example.* Consider $G$ with $n = #nv$ vertices and $m = #ne$ edges #edges.map(((u, v)) => [${#u, #v}$]).join(", "). With $K = #K$, the spanning tree using edges #selected-edges.map(i => [${#edges.at(i).at(0), #edges.at(i).at(1)}$]).join(", ") has maximum vertex degree $<= #K$. + + #pred-commands( + "pred create --example DegreeConstrainedSpanningTree -o dcst.json", + "pred solve dcst.json", + "pred evaluate dcst.json --config " + x.optimal_config.map(str).join(","), + ) + ] + ] +} + +#{ + let x = load-model-example("BoundedDiameterSpanningTree") + let nv = graph-num-vertices(x.instance) + let ne = graph-num-edges(x.instance) + let edges = x.instance.graph.edges + let B = x.instance.weight_bound + let D = x.instance.diameter_bound + let ew = x.instance.edge_weights + let sol = (config: x.optimal_config, metric: x.optimal_value) + let selected-edges = sol.config.enumerate().filter(((i, v)) => v == 1).map(((i, _)) => i) + let total-weight = selected-edges.map(i => ew.at(i)).sum() + [ + #problem-def("BoundedDiameterSpanningTree")[ + Given an undirected graph $G = (V, E)$ with positive edge weights $w: E -> ZZ_(> 0)$, a weight bound $B$, and a diameter bound $D$, determine whether $G$ contains a spanning tree $T$ such that the total weight $sum_(e in T) w(e) <= B$ and the diameter of $T$ (the longest shortest path in number of edges) is at most $D$. + ][ + Bounded Diameter Spanning Tree is NP-hard @garey1979[ND3]. The problem asks for a spanning tree that simultaneously satisfies a budget constraint on total edge weight and a structural constraint on tree diameter. The best known exact algorithm uses brute-force enumeration in $O^*(n^n)$ time, where $n = |V|$. + + *Example.* Consider $G$ with $n = #nv$ vertices and $m = #ne$ edges #edges.map(((u, v)) => [${#u, #v}$]).join(", ") with edge weights #edges.enumerate().map(((i, (u, v))) => [$w({#u, #v}) = #(ew.at(i))$]).join(", "). With $B = #B$ and $D = #D$, the spanning tree using edges #selected-edges.map(i => [${#edges.at(i).at(0), #edges.at(i).at(1)}$]).join(", ") has total weight $#total-weight <= #B$ and diameter $<= #D$. + + #pred-commands( + "pred create --example BoundedDiameterSpanningTree -o bdst.json", + "pred solve bdst.json", + "pred evaluate bdst.json --config " + x.optimal_config.map(str).join(","), + ) + ] + ] +} + +#{ + let x = load-model-example("MonochromaticTriangle") + let nv = graph-num-vertices(x.instance) + let ne = graph-num-edges(x.instance) + let edges = x.instance.graph.edges + let sol = (config: x.optimal_config, metric: x.optimal_value) + [ + #problem-def("MonochromaticTriangle")[ + Given an undirected graph $G = (V, E)$, determine whether the edges of $G$ can be 2-colored (each edge assigned color 0 or 1) so that no triangle is monochromatic — that is, for every three mutually adjacent vertices $u, v, w$, the three edges ${u, v}$, ${u, w}$, ${v, w}$ do not all receive the same color. + ][ + Monochromatic Triangle is closely related to Ramsey theory. By the classical result $R(3, 3) = 6$, the complete graph $K_6$ admits no valid 2-coloring, while $K_5$ does. The problem is NP-complete in general @garey1979[GT6]. The best known exact algorithm uses brute-force enumeration in $O^*(2^m)$ time, where $m = |E|$. + + *Example.* Consider $K_4$ with $n = #nv$ vertices and $m = #ne$ edges. A valid coloring assigns colors #sol.config.enumerate().map(((i, c)) => [edge #i $arrow.r$ #c]).join(", "), so that no triangle has all three edges the same color. + + #pred-commands( + "pred create --example MonochromaticTriangle -o monochromatic-triangle.json", + "pred solve monochromatic-triangle.json", + "pred evaluate monochromatic-triangle.json --config " + x.optimal_config.map(str).join(","), + ) + ] + ] +} + +#{ + let x = load-model-example("PartitionIntoCliques") + let nv = graph-num-vertices(x.instance) + let ne = graph-num-edges(x.instance) + let edges = x.instance.graph.edges + let K = x.instance.num_cliques + let sol = (config: x.optimal_config, metric: x.optimal_value) + let groups = range(K).map(c => sol.config.enumerate().filter(((i, v)) => v == c).map(((i, _)) => i)) + [ + #problem-def("PartitionIntoCliques")[ + Given an undirected graph $G = (V, E)$ and a positive integer $K <= |V|$, determine whether the vertex set $V$ can be partitioned into $k <= K$ groups $V_1, dots, V_k$ such that each group $V_i$ induces a complete subgraph (clique) in $G$. + ][ + Partition Into Cliques is NP-complete @garey1979[GT15]. The problem is the complement of Graph Coloring: a valid clique cover of $G$ corresponds to a valid coloring of the complement graph $overline(G)$. The best known exact algorithm uses brute-force enumeration in $O^*(2^n)$ time. + + *Example.* Consider $G$ with $n = #nv$ vertices and edges #edges.map(((u, v)) => [${#u, #v}$]).join(", "). With $K = #K$, the partition #groups.enumerate().filter(((_, c)) => c.len() > 0).map(((i, c)) => $V_#(i + 1) = {#c.map(v => $v_#v$).join(", ")}$).join(", ") is valid: each group induces a clique. + + #pred-commands( + "pred create --example PartitionIntoCliques -o partition-into-cliques.json", + "pred solve partition-into-cliques.json", + "pred evaluate partition-into-cliques.json --config " + x.optimal_config.map(str).join(","), + ) + ] + ] +} + +#{ + let x = load-model-example("PartitionIntoPerfectMatchings") + let nv = graph-num-vertices(x.instance) + let ne = graph-num-edges(x.instance) + let edges = x.instance.graph.edges + let K = x.instance.num_matchings + let sol = (config: x.optimal_config, metric: x.optimal_value) + let groups = range(K).map(c => sol.config.enumerate().filter(((i, v)) => v == c).map(((i, _)) => i)) + [ + #problem-def("PartitionIntoPerfectMatchings")[ + Given an undirected graph $G = (V, E)$ and a positive integer $K <= |V|$, determine whether the vertex set $V$ can be partitioned into $k <= K$ groups $V_1, dots, V_k$ such that the subgraph induced by each group $V_i$ is a perfect matching: every vertex in $V_i$ has exactly one neighbor within $V_i$. Empty groups are permitted. + ][ + Partition Into Perfect Matchings is NP-complete @garey1979[GT16]. The problem asks whether the edges of a graph can be decomposed into perfect matchings of vertex-induced subgraphs. The best known exact algorithm uses brute-force enumeration in $O^*(K^n)$ time. + + *Example.* Consider $G$ with $n = #nv$ vertices and edges #edges.map(((u, v)) => [${#u, #v}$]).join(", "). With $K = #K$, the partition #groups.enumerate().filter(((_, c)) => c.len() > 0).map(((i, c)) => $V_#(i + 1) = {#c.map(v => $v_#v$).join(", ")}$).join(", ") is valid: each group induces a perfect matching. + + #pred-commands( + "pred create --example PartitionIntoPerfectMatchings -o partition-into-perfect-matchings.json", + "pred solve partition-into-perfect-matchings.json", + "pred evaluate partition-into-perfect-matchings.json --config " + x.optimal_config.map(str).join(","), + ) + ] + ] +} + #{ let x = load-model-example("BinPacking") let sizes = x.instance.sizes @@ -4530,6 +4931,33 @@ A classical NP-complete problem from Garey and Johnson @garey1979[Ch.~3, p.~76], ] } +#{ + let x = load-model-example("FeasibleRegisterAssignment") + let n = x.instance.num_vertices + let arcs = x.instance.arcs + let K = x.instance.num_registers + let asgn = x.instance.assignment + let sigma = x.optimal_config + let order = range(n).map(pos => + range(n).find(v => sigma.at(v) == pos) + ) + [ + #problem-def("FeasibleRegisterAssignment")[ + Given a directed acyclic graph $G = (V, A)$ with $n = |V|$ vertices, where each arc $(v, u) in A$ means vertex $v$ depends on vertex $u$, $K$ registers, and a register assignment $f: V arrow {0, dots, K-1}$, determine whether there exists a topological evaluation ordering such that no register conflict arises: when a vertex $v$ is evaluated and placed in register $f(v)$, no other vertex currently occupying register $f(v)$ still has uncomputed dependents. + ][ + Feasible Register Assignment is NP-complete @bouchez2006. It is closely related to Register Sufficiency (SS19 in Garey & Johnson @garey1979), but here the register assignment is given and only the scheduling order is sought. + + *Example.* Let $n = #n$ vertices with arcs: #{arcs.map(a => $v_#(a.at(0)) arrow.r v_#(a.at(1))$).join(", ")}. Registers $K = #K$, assignment $f = (#asgn.map(r => str(r)).join(", "))$. The evaluation order $(#order.map(v => $v_#v$).join(", "))$ is feasible. + + #pred-commands( + "pred create --example FeasibleRegisterAssignment -o feasible-register-assignment.json", + "pred solve feasible-register-assignment.json --solver brute-force", + "pred evaluate feasible-register-assignment.json --config " + x.optimal_config.map(str).join(","), + ) + ] + ] +} + #{ let x = load-model-example("RegisterSufficiency") let n = x.instance.num_vertices @@ -4945,6 +5373,31 @@ A classical NP-complete problem from Garey and Johnson @garey1979[Ch.~3, p.~76], ] } +#{ + let x = load-model-example("SubsetProduct") + let sizes = x.instance.sizes + let target = x.instance.target + let n = sizes.len() + let config = x.optimal_config + let selected = range(n).filter(i => config.at(i) == 1) + let sel-sizes = selected.map(i => sizes.at(i)) + [ + #problem-def("SubsetProduct")[ + Given a finite set $A = {a_0, dots, a_(n-1)}$ with sizes $s(a_i) in ZZ^+$ and a target $B in ZZ^+$, determine whether there exists a subset $A' subset.eq A$ such that $product_(a in A') s(a) = B$. + ][ + The multiplicative analogue of Subset Sum. Instead of seeking a subset whose elements sum to a target, we seek one whose product equals the target. NP-complete; the best known exact algorithm is an $O^*(2^(n slash 2))$ meet-in-the-middle approach analogous to Horowitz--Sahni for Subset Sum. + + *Example.* Let $A = {#sizes.map(s => str(s)).join(", ")}$ ($n = #n$) and target $B = #target$. Selecting $A' = {#sel-sizes.map(s => str(s)).join(", ")}$ gives product $#sel-sizes.map(s => str(s)).join(" times ") = #target = B$. + + #pred-commands( + "pred create --example SubsetProduct -o subset-product.json", + "pred solve subset-product.json", + "pred evaluate subset-product.json --config " + x.optimal_config.map(str).join(","), + ) + ] + ] +} + #problem-def("ResourceConstrainedScheduling")[ Given a set $T$ of $n$ unit-length tasks, $m$ identical processors, $r$ resources with bounds $B_i$ ($1 <= i <= r$), resource requirements $R_i (t)$ for each task $t$ and resource $i$ ($0 <= R_i (t) <= B_i$), and an overall deadline $D in ZZ^+$, determine whether there exists an $m$-processor schedule $sigma : T -> {0, dots, D-1}$ such that for every time slot $u$, at most $m$ tasks are scheduled at $u$ and $sum_(t : sigma(t) = u) R_i (t) <= B_i$ for each resource $i$. ][ @@ -5095,6 +5548,130 @@ A classical NP-complete problem from Garey and Johnson @garey1979[Ch.~3, p.~76], ] } +#{ + let x = load-model-example("Numerical3DimensionalMatching") + let m = x.instance.sizes_w.len() + let sw = x.instance.sizes_w + let sx = x.instance.sizes_x + let sy = x.instance.sizes_y + let bound = x.instance.bound + let config = x.optimal_config + let x-perm = config.slice(0, m) + let y-perm = config.slice(m) + [ + #problem-def("Numerical3DimensionalMatching")[ + Given disjoint sets $W$, $X$, $Y$ each with $m$ elements, positive integer sizes $s(a)$ with $B\/4 < s(a) < B\/2$ for every element, and a bound $B in ZZ^+$ such that $sum s(a) = m B$, determine whether $W union X union Y$ can be partitioned into $m$ triples, each containing one element from $W$, $X$, and $Y$, with each triple summing to exactly $B$. + ][ + Numerical 3-Dimensional Matching is strongly NP-complete (SP16 in Garey and Johnson @garey1979). The strict size window $B\/4 < s(a) < B\/2$ forces every feasible triple to contain exactly one element from each set. The problem is a key intermediate in strong NP-completeness reductions to bin packing, scheduling, and layout problems. Brute-force enumeration runs in $O^*(m^(2m))$ time. + + *Example.* Let $m = #m$ and $B = #bound$. The sizes are $W = (#sw.map(str).join(", "))$, $X = (#sx.map(str).join(", "))$, $Y = (#sy.map(str).join(", "))$. The matching pairs each $w_i$ with $x_(pi(i))$ and $y_(sigma(i))$: #range(m).map(i => [$w_#i + x_#(x-perm.at(i)) + y_#(y-perm.at(i)) = #(sw.at(i) + sx.at(x-perm.at(i)) + sy.at(y-perm.at(i)))$]).join(", "), all equal to $B$. + + #pred-commands( + "pred create --example Numerical3DimensionalMatching -o n3dm.json", + "pred solve n3dm.json", + "pred evaluate n3dm.json --config " + config.map(str).join(","), + ) + ] + ] +} +#{ + let x = load-model-example("NonLivenessFreePetriNet") + let np = x.instance.num_places + let nt = x.instance.num_transitions + let config = x.optimal_config + [ + #problem-def("NonLivenessFreePetriNet")[ + Given a free-choice Petri net $P = (S, T, F, M_0)$ with $|S|$ places, $|T|$ transitions, flow relation $F$, and initial marking $M_0$, determine whether $P$ is _not live_: does there exist a transition $t in T$ and a marking $M$ reachable from $M_0$ such that $t$ can never fire again from $M$? + ][ + Non-Liveness of free-choice Petri nets is NP-complete (Garey and Johnson @garey1979). A Petri net is _free-choice_ if every two transitions sharing an input place have identical presets. The implementation explores the bounded reachability graph (capped at the initial token sum per place) and checks whether any transition becomes permanently dead. + + *Example.* A chain net with $#np$ places and $#nt$ transitions: $t_0$ moves a token from $s_0$ to $s_1$, $t_1$ from $s_1$ to $s_2$, $t_2$ from $s_2$ to $s_3$. Starting from $M_0 = (1, 0, 0, 0)$, after all transitions fire once the net reaches deadlock at $(0, 0, 0, 1)$ and all transitions are permanently dead. The witness configuration $(#config.map(str).join(", "))$ confirms all transitions are globally dead. + + #pred-commands( + "pred create --example NonLivenessFreePetriNet -o petri.json", + "pred solve petri.json --solver brute-force", + "pred evaluate petri.json --config " + config.map(str).join(","), + ) + ] + ] +} + +#{ + let x = load-model-example("Betweenness") + let n = x.instance.num_elements + let triples = x.instance.triples + let config = x.optimal_config + [ + #problem-def("Betweenness")[ + Given a finite set $A = {a_0, dots, a_(n-1)}$ of $n$ elements and a collection $C$ of ordered triples $(a, b, c)$, determine whether there exists a linear ordering $f: A -> {0, dots, n-1}$ (a bijection) such that for every $(a, b, c) in C$, either $f(a) < f(b) < f(c)$ or $f(c) < f(b) < f(a)$ — that is, $b$ is _between_ $a$ and $c$ in the ordering. + ][ + Betweenness is problem MS1 in Garey and Johnson @garey1979. It arises in seriation, archaeological sequencing, and DNA physical mapping. The problem is NP-complete even when restricted to dense constraint sets. The implementation represents a solution as a permutation $f$ where $f(i)$ is the position assigned to element $i$. + + *Example.* Consider $n = #n$ elements with triples #triples.map(t => [$(#t.at(0), #t.at(1), #t.at(2))$]).join(", "). The witness ordering $f = (#config.map(str).join(", "))$ (the identity permutation) satisfies all constraints: each middle element of every triple lies between the other two in the ordering. + + #pred-commands( + "pred create --example Betweenness -o betweenness.json", + "pred solve betweenness.json", + "pred evaluate betweenness.json --config " + config.map(str).join(","), + ) + ] + ] +} + +#{ + let x = load-model-example("CyclicOrdering") + let n = x.instance.num_elements + let triples = x.instance.triples + let config = x.optimal_config + [ + #problem-def("CyclicOrdering")[ + Given a finite set $A = {a_0, dots, a_(n-1)}$ of $n$ elements and a collection $C$ of ordered triples $(a, b, c)$, determine whether there exists a permutation $f: A -> {0, dots, n-1}$ (a bijection) such that for every $(a, b, c) in C$, the values $f(a)$, $f(b)$, $f(c)$ appear in cyclic order — i.e., $f(a) < f(b) < f(c)$ or $f(b) < f(c) < f(a)$ or $f(c) < f(a) < f(b)$. + ][ + Cyclic Ordering is problem MS2 in Garey and Johnson @garey1979. It is closely related to Betweenness (MS1) but enforces a cyclic rather than linear ordering constraint. The problem is NP-complete. The implementation represents a solution as a permutation $f$ where $f(i)$ is the position assigned to element $i$. + + *Example.* Consider $n = #n$ elements with triples #triples.map(t => [$(#t.at(0), #t.at(1), #t.at(2))$]).join(", "). The witness ordering $f = (#config.map(str).join(", "))$ satisfies all constraints: each triple's elements appear in cyclic order under $f$. + + #pred-commands( + "pred create --example CyclicOrdering -o cyclic_ordering.json", + "pred solve cyclic_ordering.json", + "pred evaluate cyclic_ordering.json --config " + config.map(str).join(","), + ) + ] + ] +} + +#{ + let x = load-model-example("DynamicStorageAllocation") + let items = x.instance.items + let D = x.instance.memory_size + let config = x.optimal_config + let n = items.len() + [ + #problem-def("DynamicStorageAllocation")[ + Given $n$ items, each with arrival time $r(a)$, departure time $d(a)$, and size $s(a)$, and a storage size $D$, determine whether there exists a starting address $sigma(a) in {0, dots, D - s(a)}$ for each item $a$ such that for every pair of items $a, a'$ with overlapping time intervals ($r(a) < d(a')$ and $r(a') < d(a)$), the memory intervals $[sigma(a), sigma(a) + s(a) - 1]$ and $[sigma(a'), sigma(a') + s(a') - 1]$ are disjoint. + ][ + Dynamic Storage Allocation is Garey and Johnson's SR2 @garey1979 and models memory allocation for processes with known lifetimes. It generalises strip-packing and bin-packing with time constraints. The implementation encodes each item's starting address as a single variable with domain $D - s(a) + 1$. + + *Example.* Let $D = #D$ and consider #n items with $(r, d, s)$ tuples #items.map(t => $(#t.at(0), #t.at(1), #t.at(2))$).join(", "). The witness assignment $sigma = (#config.map(str).join(", "))$ places every item within $[0, #(D - 1)]$ and ensures no two time-overlapping items share memory cells. + + #pred-commands( + "pred create --example DynamicStorageAllocation -o dynamic-storage-allocation.json", + "pred solve dynamic-storage-allocation.json", + "pred evaluate dynamic-storage-allocation.json --config " + config.map(str).join(","), + ) + + #align(center, table( + columns: 5, + align: center, + table.header([Item], [Arrival], [Departure], [Size], [$sigma$]), + ..items.enumerate().map(((i, t)) => ( + [$a_#i$], [$#t.at(0)$], [$#t.at(1)$], [$#t.at(2)$], [$#(config.at(i))$], + )).flatten(), + )) + ] + ] +} + #{ let x = load-model-example("KthLargestMTuple") let sets = x.instance.sets @@ -7583,6 +8160,56 @@ A classical NP-complete problem from Garey and Johnson @garey1979[Ch.~3, p.~76], ] } +#{ + let x = load-model-example("MinimumMatrixDomination") + let M = x.instance.matrix + let m = M.len() + let n = if m > 0 { M.at(0).len() } else { 0 } + let ones = x.instance.ones + let cfg = x.optimal_config + let selected-ones = ones.enumerate().filter(((k, _)) => cfg.at(k) == 1).map(((_, pos)) => pos) + [ + #problem-def("MinimumMatrixDomination")[ + Given an $m times n$ binary matrix $M$, find a minimum-cardinality subset $C$ of 1-entries such that every 1-entry not in $C$ shares a row or column with some entry in $C$. + ][ + Minimum Matrix Domination is a matrix analogue of the dominating set problem. Each binary variable corresponds to a 1-entry in row-major order; the evaluator checks that every unselected 1-entry shares a row or column with at least one selected entry. The brute-force complexity is $O(2^k)$ where $k$ is the number of 1-entries. + + *Example.* Let $M$ be the #(m)$times$#(n) adjacency matrix of $P_6$ (the path on 6 vertices), which has #(ones.len()) non-zero entries. The optimal config $(#cfg.map(str).join(", "))$ selects entries at positions #selected-ones.map(((r, c)) => [(#r, #c)]).join(", "), yielding value $= #x.optimal_value$. + + #pred-commands( + "pred create --example " + problem-spec(x) + " -o mmd.json", + "pred solve mmd.json", + "pred evaluate mmd.json --config " + x.optimal_config.map(str).join(","), + ) + ] + ] +} + +#{ + let x = load-model-example("MinimumWeightSolutionToLinearEquations") + let A = x.instance.matrix + let b = x.instance.rhs + let n = A.len() + let m = if n > 0 { A.at(0).len() } else { 0 } + let cfg = x.optimal_config + let selected-cols = cfg.enumerate().filter(((_, v)) => v == 1).map(((j, _)) => j) + [ + #problem-def("MinimumWeightSolutionToLinearEquations")[ + Given an $n times m$ integer matrix $A$ and an integer vector $b in ZZ^n$, find a rational vector $y in QQ^m$ satisfying $A y = b$ that minimizes $||y||_0$ (the number of non-zero entries of $y$). + ][ + Minimum Weight Solution to Linear Equations is a sparsity-seeking variant of solving linear systems. Each binary variable $x_j$ indicates whether the $j$-th component of $y$ may be non-zero; the evaluator forms the restricted submatrix $A'$ from the selected columns and checks whether $b$ lies in its column space via integer Gaussian elimination (using i128 arithmetic for exact rational consistency). If the restricted system $A' y' = b$ is consistent, the value is the number of selected columns; otherwise the configuration is infeasible. + + *Example.* Let $A$ be the #(n)$times$#(m) matrix $mat(#A.at(0).map(str).join(", "); #A.at(1).map(str).join(", "))$ with $b = (#b.map(str).join(", "))$. The optimal config $(#cfg.map(str).join(", "))$ selects columns #selected-cols.map(str).join(", "), yielding value $= #x.optimal_value$. + + #pred-commands( + "pred create --example " + problem-spec(x) + " -o mwsle.json", + "pred solve mwsle.json", + "pred evaluate mwsle.json --config " + x.optimal_config.map(str).join(","), + ) + ] + ] +} + #{ let x = load-model-example("IntegerExpressionMembership") [ diff --git a/docs/paper/references.bib b/docs/paper/references.bib index 94fd2150..aa3c5f81 100644 --- a/docs/paper/references.bib +++ b/docs/paper/references.bib @@ -1,3 +1,15 @@ +@inproceedings{bouchez2006, + author = {Florent Bouchez and Alain Darte and Christophe Guillon and Fabrice Rastello}, + title = {Register Allocation: What Does the {NP}-Completeness Proof of {Chaitin} et al. Really Prove?}, + booktitle = {Languages and Compilers for Parallel Computing (LCPC)}, + series = {LNCS}, + volume = {4382}, + pages = {283--298}, + year = {2006}, + publisher = {Springer}, + doi = {10.1007/978-3-540-72521-3_21} +} + @article{sethi1975, author = {Ravi Sethi}, title = {Complete Register Allocation Problems}, @@ -658,6 +670,17 @@ @article{aspvall1979 doi = {10.1016/0020-0190(79)90002-4} } +@article{lichtenstein1982, + author = {David Lichtenstein}, + title = {Planar Formulae and Their Uses}, + journal = {SIAM Journal on Computing}, + volume = {11}, + number = {2}, + pages = {329--343}, + year = {1982}, + doi = {10.1137/0211025} +} + @inproceedings{hansen2019, author = {Thomas Dueholm Hansen and Haim Kaplan and Or Zamir and Uri Zwick}, title = {Faster $k$-{SAT} Algorithms Using Biased-{PPSZ}}, diff --git a/problemreductions-cli/src/cli.rs b/problemreductions-cli/src/cli.rs index 6f21cc7e..14bbfdef 100644 --- a/problemreductions-cli/src/cli.rs +++ b/problemreductions-cli/src/cli.rs @@ -228,6 +228,7 @@ Flags by problem type: KColoring --graph, --k KClique --graph, --k MinimumMultiwayCut --graph, --terminals, --edge-weights + MonochromaticTriangle --graph PartitionIntoTriangles --graph GeneralizedHex --graph, --source, --sink IntegralFlowWithMultipliers --arcs, --capacities, --source, --sink, --multipliers, --requirement @@ -248,8 +249,13 @@ Flags by problem type: BinPacking --sizes, --capacity CapacityAssignment --capacities, --cost-matrix, --delay-matrix, --cost-budget, --delay-budget ProductionPlanning --num-periods, --demands, --capacities, --setup-costs, --production-costs, --inventory-costs, --cost-bound + SubsetProduct --sizes, --target SubsetSum --sizes, --target + Numerical3DimensionalMatching --w-sizes, --x-sizes, --y-sizes, --bound + Betweenness --n, --sets (triples a,b,c) + CyclicOrdering --n, --sets (triples a,b,c) ThreePartition --sizes, --bound + DynamicStorageAllocation --release-times, --deadlines, --sizes, --capacity KthLargestMTuple --sets, --k, --bound QuadraticCongruences --coeff-a, --coeff-b, --coeff-c QuadraticDiophantineEquations --coeff-a, --coeff-b, --coeff-c @@ -263,6 +269,7 @@ Flags by problem type: EnsembleComputation --universe, --sets, --budget ComparativeContainment --universe, --r-sets, --s-sets [--r-weights] [--s-weights] X3C (ExactCoverBy3Sets) --universe, --sets (3 elements each) + 3DM (ThreeDimensionalMatching) --universe, --sets (triples w,x,y) SetBasis --universe, --sets, --k MinimumCardinalityKey --num-attributes, --dependencies PrimeAttributeName --universe, --deps, --query @@ -325,9 +332,11 @@ Flags by problem type: StringToStringCorrection --source-string, --target-string, --bound [--alphabet-size] D2CIF --arcs, --capacities, --source-1, --sink-1, --source-2, --sink-2, --requirement-1, --requirement-2 MinimumDummyActivitiesPert --arcs [--num-vertices] + FeasibleRegisterAssignment --arcs, --assignment, --k [--num-vertices] RegisterSufficiency --arcs, --bound [--num-vertices] CBQ --domain-size, --relations, --conjuncts-spec IntegerExpressionMembership --expression (JSON), --target + MinimumGeometricConnectedDominatingSet --positions (float x,y pairs), --radius ILP, CircuitSAT (via reduction only) Geometry graph variants (use slash notation, e.g., MIS/KingsSubgraph): @@ -475,7 +484,7 @@ pub struct CreateArgs { /// Random seed for reproducibility #[arg(long)] pub seed: Option, - /// Target value (for Factoring and SubsetSum) + /// Target value (for Factoring, SubsetSum, and SubsetProduct) #[arg(long)] pub target: Option, /// Bits for first factor (for Factoring); also accepted as a processor-count alias for scheduling create commands @@ -595,6 +604,9 @@ pub struct CreateArgs { /// Upper bound on total path weight #[arg(long)] pub weight_bound: Option, + /// Upper bound on tree diameter (in edges) for BoundedDiameterSpanningTree + #[arg(long)] + pub diameter_bound: Option, /// Upper bound on total inter-partition arc cost #[arg(long)] pub cost_bound: Option, @@ -779,6 +791,12 @@ pub struct CreateArgs { /// Expression tree for IntegerExpressionMembership (JSON, e.g., '{"Sum":[{"Atom":1},{"Atom":2}]}') #[arg(long)] pub expression: Option, + /// Equations for AlgebraicEquationsOverGF2 (semicolon-separated polynomials, each a colon-separated list of monomials, each a comma-separated list of variable indices; empty monomial = constant 1; e.g., "0,1:2;1,2:0:;0:1:2:") + #[arg(long)] + pub equations: Option, + /// Register assignment for FeasibleRegisterAssignment (comma-separated register indices, e.g., "0,1,0,0") + #[arg(long)] + pub assignment: Option, /// Coefficient/parameter a for QuadraticCongruences (residue target) or QuadraticDiophantineEquations (coefficient of x²) #[arg(long)] pub coeff_a: Option, @@ -791,6 +809,21 @@ pub struct CreateArgs { /// Incongruence pairs for SimultaneousIncongruences (semicolon-separated "a,b" pairs, e.g., "2,2;1,3;2,5;3,7") #[arg(long)] pub pairs: Option, + /// W-set sizes for Numerical3DimensionalMatching (comma-separated, e.g., "4,5") + #[arg(long)] + pub w_sizes: Option, + /// X-set sizes for Numerical3DimensionalMatching (comma-separated, e.g., "4,5") + #[arg(long)] + pub x_sizes: Option, + /// Y-set sizes for Numerical3DimensionalMatching (comma-separated, e.g., "5,7") + #[arg(long)] + pub y_sizes: Option, + /// Initial marking for NonLivenessFreePetriNet (comma-separated tokens per place, e.g., "1,0,0,0") + #[arg(long)] + pub initial_marking: Option, + /// Output arcs (transition-to-place) for NonLivenessFreePetriNet (e.g., "0>1,1>2,2>3") + #[arg(long)] + pub output_arcs: Option, } #[derive(clap::Args)] diff --git a/problemreductions-cli/src/commands/create.rs b/problemreductions-cli/src/commands/create.rs index 4b49a489..cd3eaca4 100644 --- a/problemreductions-cli/src/commands/create.rs +++ b/problemreductions-cli/src/commands/create.rs @@ -8,34 +8,38 @@ use crate::util; use anyhow::{bail, Context, Result}; use problemreductions::export::{ModelExample, ProblemRef, ProblemSide, RuleExample}; use problemreductions::models::algebraic::{ - ClosestVectorProblem, ConsecutiveBlockMinimization, ConsecutiveOnesMatrixAugmentation, - ConsecutiveOnesSubmatrix, EquilibriumPoint, FeasibleBasisExtension, QuadraticCongruences, - QuadraticDiophantineEquations, SimultaneousIncongruences, SparseMatrixCompression, BMF, + AlgebraicEquationsOverGF2, ClosestVectorProblem, ConsecutiveBlockMinimization, + ConsecutiveOnesMatrixAugmentation, ConsecutiveOnesSubmatrix, EquilibriumPoint, + FeasibleBasisExtension, MinimumMatrixDomination, MinimumWeightSolutionToLinearEquations, + QuadraticCongruences, QuadraticDiophantineEquations, SimultaneousIncongruences, + SparseMatrixCompression, BMF, }; use problemreductions::models::formula::Quantifier; use problemreductions::models::graph::{ DirectedHamiltonianPath, DisjointConnectingPaths, GeneralizedHex, HamiltonianCircuit, - HamiltonianPath, HamiltonianPathBetweenTwoVertices, IntegralFlowBundles, + HamiltonianPath, HamiltonianPathBetweenTwoVertices, IntegralFlowBundles, Kernel, LengthBoundedDisjointPaths, LongestCircuit, LongestPath, MinimumCutIntoBoundedSets, - MinimumDummyActivitiesPert, MinimumMaximalMatching, MinimumMultiwayCut, MixedChinesePostman, - MultipleChoiceBranching, PathConstrainedNetworkFlow, RootedTreeArrangement, SteinerTree, - SteinerTreeInGraphs, StrongConnectivityAugmentation, + MinimumDummyActivitiesPert, MinimumGeometricConnectedDominatingSet, MinimumMaximalMatching, + MinimumMultiwayCut, MixedChinesePostman, MultipleChoiceBranching, PathConstrainedNetworkFlow, + RootedTreeArrangement, SteinerTree, SteinerTreeInGraphs, StrongConnectivityAugmentation, }; use problemreductions::models::misc::{ - AdditionalKey, BinPacking, BoyceCoddNormalFormViolation, CapacityAssignment, CbqRelation, - ConjunctiveBooleanQuery, ConsistencyOfDatabaseFrequencyTables, EnsembleComputation, - ExpectedRetrievalCost, FlowShopScheduling, FrequencyTable, GroupingBySwapping, IntExpr, + AdditionalKey, Betweenness, BinPacking, BoyceCoddNormalFormViolation, CapacityAssignment, + CbqRelation, ConjunctiveBooleanQuery, ConsistencyOfDatabaseFrequencyTables, CyclicOrdering, + DynamicStorageAllocation, EnsembleComputation, ExpectedRetrievalCost, + FeasibleRegisterAssignment, FlowShopScheduling, FrequencyTable, GroupingBySwapping, IntExpr, IntegerExpressionMembership, JobShopScheduling, KnownValue, KthLargestMTuple, LongestCommonSubsequence, MinimumExternalMacroDataCompression, MinimumInternalMacroDataCompression, MinimumTardinessSequencing, MultiprocessorScheduling, - PaintShop, PartiallyOrderedKnapsack, PreemptiveScheduling, ProductionPlanning, QueryArg, - RectilinearPictureCompression, RegisterSufficiency, ResourceConstrainedScheduling, - SchedulingToMinimizeWeightedCompletionTime, SchedulingWithIndividualDeadlines, - SequencingToMinimizeMaximumCumulativeCost, SequencingToMinimizeTardyTaskWeight, - SequencingToMinimizeWeightedCompletionTime, SequencingToMinimizeWeightedTardiness, - SequencingWithDeadlinesAndSetUpTimes, SequencingWithReleaseTimesAndDeadlines, - SequencingWithinIntervals, ShortestCommonSupersequence, StringToStringCorrection, SubsetSum, - SumOfSquaresPartition, ThreePartition, TimetableDesign, + NonLivenessFreePetriNet, Numerical3DimensionalMatching, PaintShop, PartiallyOrderedKnapsack, + PreemptiveScheduling, ProductionPlanning, QueryArg, RectilinearPictureCompression, + RegisterSufficiency, ResourceConstrainedScheduling, SchedulingToMinimizeWeightedCompletionTime, + SchedulingWithIndividualDeadlines, SequencingToMinimizeMaximumCumulativeCost, + SequencingToMinimizeTardyTaskWeight, SequencingToMinimizeWeightedCompletionTime, + SequencingToMinimizeWeightedTardiness, SequencingWithDeadlinesAndSetUpTimes, + SequencingWithReleaseTimesAndDeadlines, SequencingWithinIntervals, ShortestCommonSupersequence, + StringToStringCorrection, SubsetProduct, SubsetSum, SumOfSquaresPartition, ThreePartition, + TimetableDesign, }; use problemreductions::models::BiconnectivityAugmentation; use problemreductions::prelude::*; @@ -131,6 +135,7 @@ fn all_data_flags_empty(args: &CreateArgs) -> bool { && args.latency_bound.is_none() && args.length_bound.is_none() && args.weight_bound.is_none() + && args.diameter_bound.is_none() && args.cost_bound.is_none() && args.delay_budget.is_none() && args.pattern.is_none() @@ -202,6 +207,7 @@ fn all_data_flags_empty(args: &CreateArgs) -> bool { && args.expression.is_none() && args.deps.is_none() && args.query.is_none() + && args.equations.is_none() && args.coeff_a.is_none() && args.coeff_b.is_none() && args.rhs.is_none() @@ -210,6 +216,12 @@ fn all_data_flags_empty(args: &CreateArgs) -> bool { && args.required_columns.is_none() && args.compilers.is_none() && args.setup_times.is_none() + && args.w_sizes.is_none() + && args.x_sizes.is_none() + && args.y_sizes.is_none() + && args.assignment.is_none() + && args.initial_marking.is_none() + && args.output_arcs.is_none() } fn emit_problem_output(output: &ProblemJsonOutput, out: &OutputConfig) -> Result<()> { @@ -643,6 +655,9 @@ fn example_for(canonical: &str, graph_type: Option<&str>) -> &'static str { "--arcs \"0>1,0>2,1>3,1>4,2>4,3>5,4>5,4>6,5>7,6>7\" --capacities 2,1,1,1,1,1,1,1,2,1 --source 0 --sink 7 --paths \"0,2,5,8;0,3,6,8;0,3,7,9;1,4,6,8;1,4,7,9\" --requirement 3" } "IsomorphicSpanningTree" => "--graph 0-1,1-2,0-2 --tree 0-1,1-2", + "BoundedDiameterSpanningTree" => { + "--graph 0-1,0-2,0-3,1-2,1-4,2-3,3-4 --edge-weights 1,2,1,1,2,1,1 --weight-bound 5 --diameter-bound 3" + } "KthBestSpanningTree" => "--graph 0-1,0-2,1-2 --edge-weights 2,3,1 --k 1 --bound 3", "LongestCircuit" => { "--graph 0-1,1-2,2-3,3-4,4-5,5-0,0-3,1-4,2-5,3-5 --edge-weights 3,2,4,1,5,2,3,2,1,2" @@ -666,6 +681,15 @@ fn example_for(canonical: &str, graph_type: Option<&str>) -> &'static str { "--num-vars 3 --clauses \"1,2;-1,3\" --quantifiers \"E,A,E\"" } "KSatisfiability" => "--num-vars 3 --clauses \"1,2,3;-1,2,-3\" --k 3", + "NonTautology" => { + "--num-vars 3 --clauses \"1,2,3;-1,-2,-3\"" + } + "OneInThreeSatisfiability" => { + "--num-vars 4 --clauses \"1,2,3;-1,3,4;2,-3,-4\"" + } + "Planar3Satisfiability" => { + "--num-vars 4 --clauses \"1,2,3;-1,2,4;1,-3,4;-2,3,-4\"" + } "QUBO" => "--matrix \"1,0.5;0.5,2\"", "QuadraticAssignment" => "--matrix \"0,5;5,0\" --distance-matrix \"0,1;1,0\"", "SpinGlass" => "--graph 0-1,1-2 --couplings 1,1", @@ -682,9 +706,16 @@ fn example_for(canonical: &str, graph_type: Option<&str>) -> &'static str { "BalancedCompleteBipartiteSubgraph" => { "--left 4 --right 4 --biedges 0-0,0-1,0-2,1-0,1-1,1-2,2-0,2-1,2-2,3-0,3-1,3-3 --k 3" } + "MaximumAchromaticNumber" => "--graph 0-1,1-2,2-3,3-4,4-5,5-0", + "MinimumCoveringByCliques" => "--graph 0-1,1-2,0-2,2-3", + "MinimumIntersectionGraphBasis" => "--graph 0-1,1-2", "MinimumMaximalMatching" => "--graph 0-1,1-2,2-3,3-4,4-5", + "DegreeConstrainedSpanningTree" => "--graph 0-1,0-2,0-3,1-2,1-4,2-3,3-4 --k 2", + "MonochromaticTriangle" => "--graph 0-1,0-2,0-3,1-2,1-3,2-3", "PartitionIntoTriangles" => "--graph 0-1,1-2,0-2", + "PartitionIntoCliques" => "--graph 0-1,0-2,1-2,3-4,3-5,4-5 --k 3", "PartitionIntoForests" => "--graph 0-1,1-2,2-0,3-4,4-5,5-3 --k 2", + "PartitionIntoPerfectMatchings" => "--graph 0-1,2-3,0-2,1-3 --k 2", "Factoring" => "--target 15 --m 4 --n 4", "CapacityAssignment" => { "--capacities 1,2,3 --cost-matrix \"1,3,6;2,4,7;1,2,5\" --delay-matrix \"8,4,1;7,3,1;6,3,1\" --delay-budget 12" @@ -727,7 +758,14 @@ fn example_for(canonical: &str, graph_type: Option<&str>) -> &'static str { "DirectedHamiltonianPath" => { "--arcs \"0>1,0>3,1>3,1>4,2>0,2>4,3>2,3>5,4>5,5>1\" --num-vertices 6" } + "Kernel" => "--arcs \"0>1,0>2,1>3,2>3,3>4,4>0,4>1\"", + "MinimumGeometricConnectedDominatingSet" => { + "--positions \"0,0;3,0;6,0;9,0;0,3;3,3;6,3;9,3\" --radius 3.5" + } "MinimumDummyActivitiesPert" => "--arcs \"0>2,0>3,1>3,1>4,2>5\" --num-vertices 6", + "FeasibleRegisterAssignment" => { + "--arcs \"0>1,0>2,1>3\" --assignment 0,1,0,0 --k 2 --num-vertices 4" + } "RegisterSufficiency" => { "--arcs \"2>0,2>1,3>1,4>2,4>3,5>0,6>4,6>5\" --bound 3 --num-vertices 7" } @@ -758,12 +796,21 @@ fn example_for(canonical: &str, graph_type: Option<&str>) -> &'static str { "--sizes 3,4,2,5,3 --weights 2,3,1,4,2 --deadlines 5,8,4,15,10 --bound 13" } "IntegerKnapsack" => "--sizes 3,4,5,2,7 --values 4,5,7,3,9 --capacity 15", + "SubsetProduct" => "--sizes 2,3,5,7,6,10 --target 210", "SubsetSum" => "--sizes 3,7,1,8,2,4 --target 11", "IntegerExpressionMembership" => { "--expression '{\"Sum\":[{\"Sum\":[{\"Union\":[{\"Atom\":1},{\"Atom\":4}]},{\"Union\":[{\"Atom\":3},{\"Atom\":6}]}]},{\"Union\":[{\"Atom\":2},{\"Atom\":5}]}]}' --target 12" } + "NonLivenessFreePetriNet" => { + "--n 4 --m 3 --arcs \"0>0,1>1,2>2\" --output-arcs \"0>1,1>2,2>3\" --initial-marking 1,0,0,0" + } + "Betweenness" => "--n 5 --sets \"0,1,2;2,3,4;0,2,4;1,3,4\"", + "CyclicOrdering" => "--n 5 --sets \"0,1,2;2,3,0;1,3,4\"", + "Numerical3DimensionalMatching" => "--w-sizes 4,5 --x-sizes 4,5 --y-sizes 5,7 --bound 15", "ThreePartition" => "--sizes 4,5,6,4,6,5 --bound 15", + "DynamicStorageAllocation" => "--release-times 0,0,1,2,3 --deadlines 3,2,4,5,5 --sizes 2,3,1,3,2 --capacity 6", "KthLargestMTuple" => "--sets \"2,5,8;3,6;1,4,7\" --k 14 --bound 12", + "AlgebraicEquationsOverGF2" => "--num-vars 3 --equations \"0,1:2;1,2:0:;0:1:2:\"", "QuadraticCongruences" => "--coeff-a 4 --coeff-b 15 --coeff-c 10", "QuadraticDiophantineEquations" => "--coeff-a 3 --coeff-b 5 --coeff-c 53", "SimultaneousIncongruences" => "--pairs \"2,2;1,3;2,5;3,7\"", @@ -802,6 +849,10 @@ fn example_for(canonical: &str, graph_type: Option<&str>) -> &'static str { "SparseMatrixCompression" => { "--matrix \"1,0,0,1;0,1,0,0;0,0,1,0;1,0,0,0\" --bound 2" } + "MinimumMatrixDomination" => "--matrix \"0,1,0;1,0,1;0,1,0\"", + "MinimumWeightSolutionToLinearEquations" => { + "--matrix '[[1,2,3,1],[2,1,1,3]]' --rhs '5,4'" + } "ConjunctiveBooleanQuery" => { "--domain-size 6 --relations \"2:0,3|1,3|2,4;3:0,1,5|1,2,5\" --conjuncts-spec \"0:v0,c3;0:v1,c3;1:v0,v1,c5\"" } @@ -824,6 +875,7 @@ fn uses_edge_weights_flag(canonical: &str) -> bool { matches!( canonical, "BottleneckTravelingSalesman" + | "BoundedDiameterSpanningTree" | "KthBestSpanningTree" | "LongestCircuit" | "MaxCut" @@ -911,6 +963,10 @@ fn help_flag_hint( match (canonical, field_name) { ("BoundedComponentSpanningForest", "max_weight") => "integer", ("SequencingWithinIntervals", "release_times") => "comma-separated integers: 0,0,5", + ("DynamicStorageAllocation", "release_times") => "comma-separated arrival times: 0,0,1,2,3", + ("DynamicStorageAllocation", "deadlines") => "comma-separated departure times: 3,2,4,5,5", + ("DynamicStorageAllocation", "sizes") => "comma-separated item sizes: 2,3,1,3,2", + ("DynamicStorageAllocation", "capacity") => "memory size D: 6", ("DisjointConnectingPaths", "terminal_pairs") => "comma-separated pairs: 0-3,2-5", ("PrimeAttributeName", "dependencies") => { "semicolon-separated dependencies: \"0,1>2,3;2,3>0,1\"" @@ -947,6 +1003,11 @@ fn help_flag_hint( } ("ConsecutiveOnesSubmatrix", "matrix") => "semicolon-separated 0/1 rows: \"1,0;0,1\"", ("SparseMatrixCompression", "matrix") => "semicolon-separated 0/1 rows: \"1,0;0,1\"", + ("MinimumMatrixDomination", "matrix") => "semicolon-separated 0/1 rows: \"1,0;0,1\"", + ("MinimumWeightSolutionToLinearEquations", "matrix") => { + "JSON 2D integer array: '[[1,2,3],[4,5,6]]'" + } + ("MinimumWeightSolutionToLinearEquations", "rhs") => "comma-separated integers: \"5,4\"", ("FeasibleBasisExtension", "matrix") => "JSON 2D integer array: '[[1,0,1],[0,1,0]]'", ("FeasibleBasisExtension", "rhs") => "comma-separated integers: \"7,5,3\"", ("FeasibleBasisExtension", "required_columns") => "comma-separated column indices: \"0,1\"", @@ -1404,6 +1465,45 @@ pub fn create(args: &CreateArgs, out: &OutputConfig) -> Result<()> { ) } + // MaximumAchromaticNumber (graph only, no weights) + "MaximumAchromaticNumber" => { + let (graph, _) = parse_graph(args).map_err(|e| { + anyhow::anyhow!( + "{e}\n\nUsage: pred create MaximumAchromaticNumber --graph 0-1,1-2,2-3,3-4,4-5,5-0" + ) + })?; + ( + ser(problemreductions::models::graph::MaximumAchromaticNumber::new(graph))?, + variant_map(&[("graph", "SimpleGraph")]), + ) + } + + // MinimumCoveringByCliques (graph only, no weights) + "MinimumCoveringByCliques" => { + let (graph, _) = parse_graph(args).map_err(|e| { + anyhow::anyhow!( + "{e}\n\nUsage: pred create MinimumCoveringByCliques --graph 0-1,1-2,0-2,2-3" + ) + })?; + ( + ser(problemreductions::models::graph::MinimumCoveringByCliques::new(graph))?, + variant_map(&[("graph", "SimpleGraph")]), + ) + } + + // MinimumIntersectionGraphBasis (graph only, no weights) + "MinimumIntersectionGraphBasis" => { + let (graph, _) = parse_graph(args).map_err(|e| { + anyhow::anyhow!( + "{e}\n\nUsage: pred create MinimumIntersectionGraphBasis --graph 0-1,1-2" + ) + })?; + ( + ser(problemreductions::models::graph::MinimumIntersectionGraphBasis::new(graph))?, + variant_map(&[("graph", "SimpleGraph")]), + ) + } + // MinimumMaximalMatching (graph only, no weights) "MinimumMaximalMatching" => { let (graph, _) = parse_graph(args).map_err(|e| { @@ -1910,6 +2010,36 @@ pub fn create(args: &CreateArgs, out: &OutputConfig) -> Result<()> { ) } + // Bounded Diameter Spanning Tree (graph + edge weights + weight bound + diameter bound) + "BoundedDiameterSpanningTree" => { + reject_vertex_weights_for_edge_weight_problem(args, canonical, None)?; + let usage = "Usage: pred create BoundedDiameterSpanningTree --graph 0-1,0-2,0-3,1-2,1-4,2-3,3-4 --edge-weights 1,2,1,1,2,1,1 --weight-bound 5 --diameter-bound 3"; + let (graph, _) = parse_graph(args).map_err(|e| anyhow::anyhow!("{e}\n\n{usage}"))?; + let edge_weights = parse_edge_weights(args, graph.num_edges())?; + ensure_positive_i32_values(&edge_weights, "edge weights")?; + let weight_bound = args.weight_bound.ok_or_else(|| { + anyhow::anyhow!("BoundedDiameterSpanningTree requires --weight-bound\n\n{usage}") + })?; + ensure_positive_i32(weight_bound, "weight_bound")?; + let diameter_bound = args.diameter_bound.ok_or_else(|| { + anyhow::anyhow!("BoundedDiameterSpanningTree requires --diameter-bound\n\n{usage}") + })?; + if diameter_bound == 0 { + bail!("BoundedDiameterSpanningTree requires --diameter-bound >= 1\n\n{usage}"); + } + ( + ser( + problemreductions::models::graph::BoundedDiameterSpanningTree::new( + graph, + edge_weights, + weight_bound, + diameter_bound, + ), + )?, + resolved_variant.clone(), + ) + } + // KthBestSpanningTree (weighted graph + k + bound) "KthBestSpanningTree" => { reject_vertex_weights_for_edge_weight_problem(args, canonical, None)?; @@ -2125,6 +2255,49 @@ pub fn create(args: &CreateArgs, out: &OutputConfig) -> Result<()> { util::ser_ksat(num_vars, clauses, k)? } + "NonTautology" => { + let num_vars = args.num_vars.ok_or_else(|| { + anyhow::anyhow!( + "NonTautology requires --num-vars\n\n\ + Usage: pred create NonTautology --num-vars 3 --clauses \"1,2,3;-1,-2,-3\"" + ) + })?; + let clauses = parse_clauses(args)?; + let disjuncts: Vec> = clauses.into_iter().map(|c| c.literals).collect(); + ( + ser(NonTautology::new(num_vars, disjuncts))?, + resolved_variant.clone(), + ) + } + + "OneInThreeSatisfiability" => { + let num_vars = args.num_vars.ok_or_else(|| { + anyhow::anyhow!( + "OneInThreeSatisfiability requires --num-vars\n\n\ + Usage: pred create OneInThreeSatisfiability --num-vars 4 --clauses \"1,2,3;-1,3,4;2,-3,-4\"" + ) + })?; + let clauses = parse_clauses(args)?; + ( + ser(OneInThreeSatisfiability::new(num_vars, clauses))?, + resolved_variant.clone(), + ) + } + + "Planar3Satisfiability" => { + let num_vars = args.num_vars.ok_or_else(|| { + anyhow::anyhow!( + "Planar3Satisfiability requires --num-vars\n\n\ + Usage: pred create Planar3Satisfiability --num-vars 4 --clauses \"1,2,3;-1,2,4;1,-3,4;-2,3,-4\"" + ) + })?; + let clauses = parse_clauses(args)?; + ( + ser(Planar3Satisfiability::new(num_vars, clauses))?, + resolved_variant.clone(), + ) + } + // QBF "QuantifiedBooleanFormulas" => { let num_vars = args.num_vars.ok_or_else(|| { @@ -2483,6 +2656,28 @@ pub fn create(args: &CreateArgs, out: &OutputConfig) -> Result<()> { ) } + // SubsetProduct + "SubsetProduct" => { + let sizes_str = args.sizes.as_deref().ok_or_else(|| { + anyhow::anyhow!( + "SubsetProduct requires --sizes and --target\n\n\ + Usage: pred create SubsetProduct --sizes 2,3,5,7,6,10 --target 210" + ) + })?; + let target = args.target.as_deref().ok_or_else(|| { + anyhow::anyhow!( + "SubsetProduct requires --target\n\n\ + Usage: pred create SubsetProduct --sizes 2,3,5,7,6,10 --target 210" + ) + })?; + let sizes = util::parse_biguint_list(sizes_str)?; + let target = util::parse_decimal_biguint(target)?; + ( + ser(SubsetProduct::new(sizes, target))?, + resolved_variant.clone(), + ) + } + // IntegerExpressionMembership "IntegerExpressionMembership" => { let usage = "Usage: pred create IntegerExpressionMembership --expression '{\"Sum\":[{\"Atom\":1},{\"Atom\":2}]}' --target 3"; @@ -2511,6 +2706,183 @@ pub fn create(args: &CreateArgs, out: &OutputConfig) -> Result<()> { ) } + // Numerical3DimensionalMatching + "Numerical3DimensionalMatching" => { + let w_sizes_str = args.w_sizes.as_deref().ok_or_else(|| { + anyhow::anyhow!( + "Numerical3DimensionalMatching requires --w-sizes, --x-sizes, --y-sizes, and --bound\n\n\ + Usage: pred create Numerical3DimensionalMatching --w-sizes 4,5 --x-sizes 4,5 --y-sizes 5,7 --bound 15" + ) + })?; + let x_sizes_str = args.x_sizes.as_deref().ok_or_else(|| { + anyhow::anyhow!( + "Numerical3DimensionalMatching requires --x-sizes\n\n\ + Usage: pred create Numerical3DimensionalMatching --w-sizes 4,5 --x-sizes 4,5 --y-sizes 5,7 --bound 15" + ) + })?; + let y_sizes_str = args.y_sizes.as_deref().ok_or_else(|| { + anyhow::anyhow!( + "Numerical3DimensionalMatching requires --y-sizes\n\n\ + Usage: pred create Numerical3DimensionalMatching --w-sizes 4,5 --x-sizes 4,5 --y-sizes 5,7 --bound 15" + ) + })?; + let bound = args.bound.ok_or_else(|| { + anyhow::anyhow!( + "Numerical3DimensionalMatching requires --bound\n\n\ + Usage: pred create Numerical3DimensionalMatching --w-sizes 4,5 --x-sizes 4,5 --y-sizes 5,7 --bound 15" + ) + })?; + let bound = u64::try_from(bound).map_err(|_| { + anyhow::anyhow!( + "Numerical3DimensionalMatching requires a positive integer --bound\n\n\ + Usage: pred create Numerical3DimensionalMatching --w-sizes 4,5 --x-sizes 4,5 --y-sizes 5,7 --bound 15" + ) + })?; + let sizes_w: Vec = util::parse_comma_list(w_sizes_str)?; + let sizes_x: Vec = util::parse_comma_list(x_sizes_str)?; + let sizes_y: Vec = util::parse_comma_list(y_sizes_str)?; + ( + ser( + Numerical3DimensionalMatching::try_new(sizes_w, sizes_x, sizes_y, bound) + .map_err(anyhow::Error::msg)?, + )?, + resolved_variant.clone(), + ) + } + + // NonLivenessFreePetriNet + "NonLivenessFreePetriNet" => { + let usage = "Usage: pred create NonLivenessFreePetriNet --n 4 --m 3 --arcs \"0>0,1>1,2>2\" --output-arcs \"0>1,1>2,2>3\" --initial-marking 1,0,0,0"; + let num_places = args.n.ok_or_else(|| { + anyhow::anyhow!("NonLivenessFreePetriNet requires --n (num_places)\n\n{usage}") + })?; + let num_transitions = args.m.ok_or_else(|| { + anyhow::anyhow!("NonLivenessFreePetriNet requires --m (num_transitions)\n\n{usage}") + })?; + let arcs_str = args.arcs.as_deref().ok_or_else(|| { + anyhow::anyhow!( + "NonLivenessFreePetriNet requires --arcs (place>transition arcs)\n\n{usage}" + ) + })?; + let output_arcs_str = args.output_arcs.as_deref().ok_or_else(|| { + anyhow::anyhow!( + "NonLivenessFreePetriNet requires --output-arcs (transition>place arcs)\n\n{usage}" + ) + })?; + let marking_str = args.initial_marking.as_deref().ok_or_else(|| { + anyhow::anyhow!("NonLivenessFreePetriNet requires --initial-marking\n\n{usage}") + })?; + + let place_to_transition: Vec<(usize, usize)> = arcs_str + .split(',') + .filter(|s| !s.trim().is_empty()) + .map(|s| { + let parts: Vec<&str> = s.trim().split('>').collect(); + if parts.len() != 2 { + bail!("Invalid arc '{s}', expected 'place>transition'"); + } + let p: usize = parts[0] + .parse() + .with_context(|| format!("Invalid place index in arc '{s}'"))?; + let t: usize = parts[1] + .parse() + .with_context(|| format!("Invalid transition index in arc '{s}'"))?; + Ok((p, t)) + }) + .collect::>()?; + + let transition_to_place: Vec<(usize, usize)> = output_arcs_str + .split(',') + .filter(|s| !s.trim().is_empty()) + .map(|s| { + let parts: Vec<&str> = s.trim().split('>').collect(); + if parts.len() != 2 { + bail!("Invalid output arc '{s}', expected 'transition>place'"); + } + let t: usize = parts[0] + .parse() + .with_context(|| format!("Invalid transition index in output arc '{s}'"))?; + let p: usize = parts[1] + .parse() + .with_context(|| format!("Invalid place index in output arc '{s}'"))?; + Ok((t, p)) + }) + .collect::>()?; + + let initial_marking: Vec = marking_str + .split(',') + .map(|s| { + s.trim() + .parse::() + .with_context(|| format!("Invalid marking value: {s}")) + }) + .collect::>()?; + + ( + ser(NonLivenessFreePetriNet::try_new( + num_places, + num_transitions, + place_to_transition, + transition_to_place, + initial_marking, + ) + .map_err(anyhow::Error::msg)?)?, + resolved_variant.clone(), + ) + } + + // Betweenness + "Betweenness" => { + let n = args.n.ok_or_else(|| { + anyhow::anyhow!( + "Betweenness requires --n and --sets\n\n\ + Usage: pred create Betweenness --n 5 --sets \"0,1,2;2,3,4;0,2,4;1,3,4\"" + ) + })?; + let sets = parse_sets(args)?; + for (i, set) in sets.iter().enumerate() { + if set.len() != 3 { + bail!( + "Triple {} has {} elements, expected 3 (a,b,c)", + i, + set.len() + ); + } + } + let triples: Vec<(usize, usize, usize)> = + sets.iter().map(|s| (s[0], s[1], s[2])).collect(); + ( + ser(Betweenness::try_new(n, triples).map_err(anyhow::Error::msg)?)?, + resolved_variant.clone(), + ) + } + + // CyclicOrdering + "CyclicOrdering" => { + let n = args.n.ok_or_else(|| { + anyhow::anyhow!( + "CyclicOrdering requires --n and --sets\n\n\ + Usage: pred create CyclicOrdering --n 5 --sets \"0,1,2;2,3,0;1,3,4\"" + ) + })?; + let sets = parse_sets(args)?; + for (i, set) in sets.iter().enumerate() { + if set.len() != 3 { + bail!( + "Triple {} has {} elements, expected 3 (a,b,c)", + i, + set.len() + ); + } + } + let triples: Vec<(usize, usize, usize)> = + sets.iter().map(|s| (s[0], s[1], s[2])).collect(); + ( + ser(CyclicOrdering::try_new(n, triples).map_err(anyhow::Error::msg)?)?, + resolved_variant.clone(), + ) + } + // ThreePartition "ThreePartition" => { let sizes_str = args.sizes.as_deref().ok_or_else(|| { @@ -2538,6 +2910,41 @@ pub fn create(args: &CreateArgs, out: &OutputConfig) -> Result<()> { ) } + // DynamicStorageAllocation + "DynamicStorageAllocation" => { + let usage = "Usage: pred create DynamicStorageAllocation --release-times 0,0,1,2,3 --deadlines 3,2,4,5,5 --sizes 2,3,1,3,2 --capacity 6"; + let rt_str = args.release_times.as_deref().ok_or_else(|| { + anyhow::anyhow!("DynamicStorageAllocation requires --release-times\n\n{usage}") + })?; + let dl_str = args.deadlines.as_deref().ok_or_else(|| { + anyhow::anyhow!("DynamicStorageAllocation requires --deadlines\n\n{usage}") + })?; + let sizes_str = args.sizes.as_deref().ok_or_else(|| { + anyhow::anyhow!("DynamicStorageAllocation requires --sizes\n\n{usage}") + })?; + let cap_str = args.capacity.as_deref().ok_or_else(|| { + anyhow::anyhow!("DynamicStorageAllocation requires --capacity\n\n{usage}") + })?; + let release_times: Vec = util::parse_comma_list(rt_str)?; + let deadlines: Vec = util::parse_comma_list(dl_str)?; + let sizes: Vec = util::parse_comma_list(sizes_str)?; + let memory_size: usize = cap_str.parse()?; + if release_times.len() != deadlines.len() || release_times.len() != sizes.len() { + bail!("--release-times, --deadlines, and --sizes must have the same length\n\n{usage}"); + } + let items: Vec<(usize, usize, usize)> = release_times + .into_iter() + .zip(deadlines) + .zip(sizes) + .map(|((r, d), s)| (r, d, s)) + .collect(); + ( + ser(DynamicStorageAllocation::try_new(items, memory_size) + .map_err(anyhow::Error::msg)?)?, + resolved_variant.clone(), + ) + } + // KthLargestMTuple "KthLargestMTuple" => { let sets_str = args.sets.as_deref().ok_or_else(|| { @@ -2572,6 +2979,52 @@ pub fn create(args: &CreateArgs, out: &OutputConfig) -> Result<()> { ) } + // AlgebraicEquationsOverGF2 + "AlgebraicEquationsOverGF2" => { + let n = args.num_vars.ok_or_else(|| { + anyhow::anyhow!( + "AlgebraicEquationsOverGF2 requires --num-vars and --equations\n\n\ + Usage: pred create AlgebraicEquationsOverGF2 --num-vars 3 --equations \"0,1:2;1,2:0:;0:1:2:\"\n\n\ + Format: semicolons separate equations, colons separate monomials within an equation,\n\ + commas separate variable indices within a monomial, empty monomial = constant 1" + ) + })?; + let eq_str = args.equations.as_deref().ok_or_else(|| { + anyhow::anyhow!( + "AlgebraicEquationsOverGF2 requires --equations\n\n\ + Usage: pred create AlgebraicEquationsOverGF2 --num-vars 3 --equations \"0,1:2;1,2:0:;0:1:2:\"" + ) + })?; + // Parse equations: "0,1:2;1,2:0:;0:1:2:" + // ';' separates equations, ':' separates monomials, ',' separates variables + let equations: Vec>> = eq_str + .split(';') + .map(|eq_s| { + eq_s.split(':') + .map(|mono_s| { + let mono_s = mono_s.trim(); + if mono_s.is_empty() { + Ok(vec![]) // constant 1 + } else { + mono_s + .split(',') + .map(|v| { + v.trim().parse::().map_err(|e| { + anyhow::anyhow!("Invalid variable index '{v}': {e}") + }) + }) + .collect::>>() + } + }) + .collect::>>>() + }) + .collect::>>>>()?; + ( + ser(AlgebraicEquationsOverGF2::new(n, equations).map_err(anyhow::Error::msg)?)?, + resolved_variant.clone(), + ) + } + // QuadraticCongruences "QuadraticCongruences" => { let a = args.coeff_a.ok_or_else(|| { @@ -2902,6 +3355,49 @@ pub fn create(args: &CreateArgs, out: &OutputConfig) -> Result<()> { ) } + // ThreeDimensionalMatching + "ThreeDimensionalMatching" => { + let universe = args.universe.ok_or_else(|| { + anyhow::anyhow!( + "ThreeDimensionalMatching requires --universe and --sets\n\n\ + Usage: pred create 3DM --universe 3 --sets \"0,1,2;1,0,1;2,2,0\"" + ) + })?; + let sets = parse_sets(args)?; + // Validate each set has exactly 3 elements representing (w, x, y) coordinates + for (i, set) in sets.iter().enumerate() { + if set.len() != 3 { + bail!( + "Triple {} has {} elements, expected 3 (w,x,y)", + i, + set.len() + ); + } + for (coord_idx, &elem) in set.iter().enumerate() { + let coord_name = ["w", "x", "y"][coord_idx]; + if elem >= universe { + bail!( + "Triple {} has {}-coordinate {} which is outside 0..{}", + i, + coord_name, + elem, + universe + ); + } + } + } + let triples: Vec<(usize, usize, usize)> = + sets.into_iter().map(|s| (s[0], s[1], s[2])).collect(); + ( + ser( + problemreductions::models::set::ThreeDimensionalMatching::new( + universe, triples, + ), + )?, + resolved_variant.clone(), + ) + } + // SetBasis "SetBasis" => { let universe = args.universe.ok_or_else(|| { @@ -3127,6 +3623,48 @@ pub fn create(args: &CreateArgs, out: &OutputConfig) -> Result<()> { ) } + // MinimumMatrixDomination + "MinimumMatrixDomination" => { + let matrix = parse_bool_matrix(args)?; + ( + ser(MinimumMatrixDomination::new(matrix))?, + resolved_variant.clone(), + ) + } + + // MinimumWeightSolutionToLinearEquations + "MinimumWeightSolutionToLinearEquations" => { + let usage = "Usage: pred create MinimumWeightSolutionToLinearEquations --matrix '[[1,2,3,1],[2,1,1,3]]' --rhs '5,4'"; + let matrix_str = args.matrix.as_deref().ok_or_else(|| { + anyhow::anyhow!( + "MinimumWeightSolutionToLinearEquations requires --matrix (JSON 2D i64 array) and --rhs\n\n{usage}" + ) + })?; + let matrix: Vec> = serde_json::from_str(matrix_str).map_err(|err| { + anyhow::anyhow!( + "MinimumWeightSolutionToLinearEquations requires --matrix as a JSON 2D integer array (e.g., '[[1,2,3],[4,5,6]]')\n\n{usage}\n\nFailed to parse --matrix: {err}" + ) + })?; + let rhs_str = args.rhs.as_deref().ok_or_else(|| { + anyhow::anyhow!( + "MinimumWeightSolutionToLinearEquations requires --rhs (comma-separated integers)\n\n{usage}" + ) + })?; + let rhs: Vec = rhs_str + .split(',') + .map(|s| s.trim().parse::()) + .collect::, _>>() + .map_err(|err| { + anyhow::anyhow!( + "Failed to parse --rhs as comma-separated integers: {err}\n\n{usage}" + ) + })?; + ( + ser(MinimumWeightSolutionToLinearEquations::new(matrix, rhs))?, + resolved_variant.clone(), + ) + } + // FeasibleBasisExtension "FeasibleBasisExtension" => { let usage = "Usage: pred create FeasibleBasisExtension --matrix '[[1,0,1],[0,1,0]]' --rhs '7,5' --required-columns '0'"; @@ -4556,6 +5094,30 @@ pub fn create(args: &CreateArgs, out: &OutputConfig) -> Result<()> { ) } + // DegreeConstrainedSpanningTree + "DegreeConstrainedSpanningTree" => { + let usage = "Usage: pred create DegreeConstrainedSpanningTree --graph 0-1,0-2,0-3,1-2,1-4,2-3,3-4 --k 2"; + let (graph, _) = parse_graph(args).map_err(|e| anyhow::anyhow!("{e}\n\n{usage}"))?; + let max_degree = args.k.ok_or_else(|| { + anyhow::anyhow!( + "DegreeConstrainedSpanningTree requires --k (maximum vertex degree)\n\n{usage}" + ) + })?; + anyhow::ensure!( + max_degree >= 1, + "DegreeConstrainedSpanningTree requires --k >= 1, got {}", + max_degree + ); + ( + ser( + problemreductions::models::graph::DegreeConstrainedSpanningTree::new( + graph, max_degree, + ), + )?, + resolved_variant.clone(), + ) + } + // DirectedHamiltonianPath "DirectedHamiltonianPath" => { let arcs_str = args.arcs.as_deref().ok_or_else(|| { @@ -4571,6 +5133,18 @@ pub fn create(args: &CreateArgs, out: &OutputConfig) -> Result<()> { ) } + // Kernel + "Kernel" => { + let arcs_str = args.arcs.as_deref().ok_or_else(|| { + anyhow::anyhow!( + "Kernel requires --arcs\n\n\ + Usage: pred create Kernel --arcs \"0>1,1>2,2>0\" [--num-vertices N]" + ) + })?; + let (graph, _) = parse_directed_graph(arcs_str, args.num_vertices)?; + (ser(Kernel::new(graph))?, resolved_variant.clone()) + } + // AcyclicPartition "AcyclicPartition" => { let usage = "Usage: pred create AcyclicPartition/i32 --arcs \"0>1,0>2,1>3,1>4,2>4,2>5,3>5,4>5\" --weights 2,3,2,1,3,1 --arc-costs 1,1,1,1,1,1,1,1 --weight-bound 5 --cost-bound 5"; @@ -4666,6 +5240,30 @@ pub fn create(args: &CreateArgs, out: &OutputConfig) -> Result<()> { ) } + // MinimumGeometricConnectedDominatingSet + "MinimumGeometricConnectedDominatingSet" => { + let usage = "Usage: pred create MinimumGeometricConnectedDominatingSet --positions \"0,0;3,0;6,0\" --radius 3.5"; + let positions = parse_float_positions(args).map_err(|_| { + anyhow::anyhow!( + "MinimumGeometricConnectedDominatingSet requires --positions\n\n\ + {usage}" + ) + })?; + let radius = args.radius.ok_or_else(|| { + anyhow::anyhow!( + "MinimumGeometricConnectedDominatingSet requires --radius\n\n\ + {usage}" + ) + })?; + ( + ser( + MinimumGeometricConnectedDominatingSet::try_new(positions, radius) + .map_err(|e| anyhow::anyhow!(e))?, + )?, + resolved_variant.clone(), + ) + } + // MinimumDummyActivitiesPert "MinimumDummyActivitiesPert" => { let usage = "Usage: pred create MinimumDummyActivitiesPert --arcs \"0>2,0>3,1>3,1>4,2>5\" [--num-vertices N]"; @@ -4682,6 +5280,51 @@ pub fn create(args: &CreateArgs, out: &OutputConfig) -> Result<()> { ) } + // FeasibleRegisterAssignment + "FeasibleRegisterAssignment" => { + let usage = "Usage: pred create FeasibleRegisterAssignment --arcs \"0>1,0>2,1>3\" --assignment 0,1,0,0 --k 2 [--num-vertices N]"; + let arcs_str = args.arcs.as_deref().ok_or_else(|| { + anyhow::anyhow!( + "FeasibleRegisterAssignment requires --arcs, --assignment, and --k\n\n\ + {usage}" + ) + })?; + let k = args.k.ok_or_else(|| { + anyhow::anyhow!( + "FeasibleRegisterAssignment requires --k (number of registers)\n\n\ + {usage}" + ) + })?; + let assignment_str = args.assignment.as_deref().ok_or_else(|| { + anyhow::anyhow!( + "FeasibleRegisterAssignment requires --assignment\n\n\ + {usage}" + ) + })?; + let (graph, _) = parse_directed_graph(arcs_str, args.num_vertices)?; + let n = graph.num_vertices(); + let arcs = graph.arcs(); + let assignment: Vec = assignment_str + .split(',') + .map(|s| { + s.trim() + .parse::() + .with_context(|| format!("Invalid assignment value: {s}")) + }) + .collect::>()?; + if assignment.len() != n { + bail!( + "Assignment length {} does not match vertex count {}\n\n{usage}", + assignment.len(), + n + ); + } + ( + ser(FeasibleRegisterAssignment::new(n, arcs, k, assignment))?, + resolved_variant.clone(), + ) + } + // RegisterSufficiency "RegisterSufficiency" => { let usage = "Usage: pred create RegisterSufficiency --arcs \"2>0,2>1,3>1,4>2,4>3,5>0,6>4,6>5\" --bound 3 [--num-vertices N]"; @@ -4807,6 +5450,19 @@ pub fn create(args: &CreateArgs, out: &OutputConfig) -> Result<()> { ) } + // MonochromaticTriangle + "MonochromaticTriangle" => { + let (graph, _) = parse_graph(args).map_err(|e| { + anyhow::anyhow!( + "{e}\n\nUsage: pred create MonochromaticTriangle --graph 0-1,0-2,0-3,1-2,1-3,2-3" + ) + })?; + ( + ser(MonochromaticTriangle::new(graph))?, + resolved_variant.clone(), + ) + } + // PartitionIntoTriangles "PartitionIntoTriangles" => { let (graph, _) = parse_graph(args).map_err(|e| { @@ -4825,6 +5481,74 @@ pub fn create(args: &CreateArgs, out: &OutputConfig) -> Result<()> { ) } + // PartitionIntoCliques + "PartitionIntoCliques" => { + let (graph, _) = parse_graph(args).map_err(|e| { + anyhow::anyhow!( + "{e}\n\nUsage: pred create PartitionIntoCliques --graph 0-1,0-2,1-2,3-4,3-5,4-5 --k 3" + ) + })?; + let num_cliques = args.k.ok_or_else(|| { + anyhow::anyhow!( + "PartitionIntoCliques requires --k (maximum number of clique groups)\n\n\ + Usage: pred create PartitionIntoCliques --graph 0-1,0-2,1-2,3-4,3-5,4-5 --k 3" + ) + })?; + anyhow::ensure!( + num_cliques >= 1, + "PartitionIntoCliques requires --k >= 1, got {}", + num_cliques + ); + anyhow::ensure!( + num_cliques <= graph.num_vertices(), + "PartitionIntoCliques requires --k <= num_vertices ({}), got {}", + graph.num_vertices(), + num_cliques + ); + ( + ser(problemreductions::models::graph::PartitionIntoCliques::new( + graph, + num_cliques, + ))?, + resolved_variant.clone(), + ) + } + + // PartitionIntoPerfectMatchings + "PartitionIntoPerfectMatchings" => { + let (graph, _) = parse_graph(args).map_err(|e| { + anyhow::anyhow!( + "{e}\n\nUsage: pred create PartitionIntoPerfectMatchings --graph 0-1,2-3,0-2,1-3 --k 2" + ) + })?; + let num_matchings = args.k.ok_or_else(|| { + anyhow::anyhow!( + "PartitionIntoPerfectMatchings requires --k (maximum number of matching groups)\n\n\ + Usage: pred create PartitionIntoPerfectMatchings --graph 0-1,2-3,0-2,1-3 --k 2" + ) + })?; + anyhow::ensure!( + num_matchings >= 1, + "PartitionIntoPerfectMatchings requires --k >= 1, got {}", + num_matchings + ); + anyhow::ensure!( + num_matchings <= graph.num_vertices(), + "PartitionIntoPerfectMatchings requires --k <= num_vertices ({}), got {}", + graph.num_vertices(), + num_matchings + ); + ( + ser( + problemreductions::models::graph::PartitionIntoPerfectMatchings::new( + graph, + num_matchings, + ), + )?, + resolved_variant.clone(), + ) + } + // PartitionIntoForests "PartitionIntoForests" => { let (graph, _) = parse_graph(args).map_err(|e| { @@ -6856,6 +7580,48 @@ fn create_random( ) } + // MaximumAchromaticNumber (graph only, no weights) + "MaximumAchromaticNumber" => { + let edge_prob = args.edge_prob.unwrap_or(0.5); + if !(0.0..=1.0).contains(&edge_prob) { + bail!("--edge-prob must be between 0.0 and 1.0"); + } + let graph = util::create_random_graph(num_vertices, edge_prob, args.seed); + let variant = variant_map(&[("graph", "SimpleGraph")]); + ( + ser(problemreductions::models::graph::MaximumAchromaticNumber::new(graph))?, + variant, + ) + } + + // MinimumCoveringByCliques (graph only, no weights) + "MinimumCoveringByCliques" => { + let edge_prob = args.edge_prob.unwrap_or(0.5); + if !(0.0..=1.0).contains(&edge_prob) { + bail!("--edge-prob must be between 0.0 and 1.0"); + } + let graph = util::create_random_graph(num_vertices, edge_prob, args.seed); + let variant = variant_map(&[("graph", "SimpleGraph")]); + ( + ser(problemreductions::models::graph::MinimumCoveringByCliques::new(graph))?, + variant, + ) + } + + // MinimumIntersectionGraphBasis (graph only, no weights) + "MinimumIntersectionGraphBasis" => { + let edge_prob = args.edge_prob.unwrap_or(0.5); + if !(0.0..=1.0).contains(&edge_prob) { + bail!("--edge-prob must be between 0.0 and 1.0"); + } + let graph = util::create_random_graph(num_vertices, edge_prob, args.seed); + let variant = variant_map(&[("graph", "SimpleGraph")]); + ( + ser(problemreductions::models::graph::MinimumIntersectionGraphBasis::new(graph))?, + variant, + ) + } + // MinimumMaximalMatching (graph only, no weights) "MinimumMaximalMatching" => { let edge_prob = args.edge_prob.unwrap_or(0.5); @@ -8346,6 +9112,7 @@ mod tests { latency_bound: None, length_bound: None, weight_bound: None, + diameter_bound: None, cost_bound: None, delay_budget: None, pattern: None, @@ -8411,6 +9178,13 @@ mod tests { required_columns: None, compilers: None, setup_times: None, + w_sizes: None, + x_sizes: None, + y_sizes: None, + equations: None, + assignment: None, + initial_marking: None, + output_arcs: None, } } diff --git a/src/lib.rs b/src/lib.rs index d8c41f14..42ec62d0 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -46,8 +46,8 @@ pub mod prelude { ConsecutiveOnesMatrixAugmentation, QuadraticAssignment, SparseMatrixCompression, BMF, QUBO, }; pub use crate::models::formula::{ - CNFClause, CircuitSAT, KSatisfiability, NAESatisfiability, QuantifiedBooleanFormulas, - Satisfiability, + CNFClause, CircuitSAT, KSatisfiability, NAESatisfiability, NonTautology, + OneInThreeSatisfiability, Planar3Satisfiability, QuantifiedBooleanFormulas, Satisfiability, }; pub use crate::models::graph::{ AcyclicPartition, BalancedCompleteBipartiteSubgraph, BicliqueCover, @@ -63,7 +63,8 @@ pub mod prelude { KColoring, LongestCircuit, MaxCut, MaximalIS, MaximumClique, MaximumIndependentSet, MaximumMatching, MinMaxMulticenter, MinimumCutIntoBoundedSets, MinimumDominatingSet, MinimumDummyActivitiesPert, MinimumFeedbackArcSet, MinimumFeedbackVertexSet, - MinimumMultiwayCut, MinimumSumMulticenter, MinimumVertexCover, MultipleChoiceBranching, + MinimumGeometricConnectedDominatingSet, MinimumMultiwayCut, MinimumSumMulticenter, + MinimumVertexCover, MonochromaticTriangle, MultipleChoiceBranching, MultipleCopyFileAllocation, OptimalLinearArrangement, PartialFeedbackEdgeSet, PartitionIntoPathsOfLength2, PartitionIntoTriangles, PathConstrainedNetworkFlow, RootedTreeArrangement, RuralPostman, ShortestWeightConstrainedPath, SteinerTreeInGraphs, diff --git a/src/models/algebraic/algebraic_equations_over_gf2.rs b/src/models/algebraic/algebraic_equations_over_gf2.rs new file mode 100644 index 00000000..be8d8c33 --- /dev/null +++ b/src/models/algebraic/algebraic_equations_over_gf2.rs @@ -0,0 +1,225 @@ +//! Algebraic Equations over GF(2) problem implementation. +//! +//! Given m multilinear polynomials over GF(2) in n variables, determine whether +//! there exists an assignment of the variables making all polynomials evaluate +//! to 0 (mod 2). + +use crate::registry::{FieldInfo, ProblemSchemaEntry, ProblemSizeFieldEntry}; +use crate::traits::Problem; +use crate::types::Or; +use serde::de::Error as _; +use serde::{Deserialize, Deserializer, Serialize}; + +inventory::submit! { + ProblemSchemaEntry { + name: "AlgebraicEquationsOverGF2", + display_name: "Algebraic Equations over GF(2)", + aliases: &[], + dimensions: &[], + module_path: module_path!(), + description: "Find assignment satisfying multilinear polynomial equations over GF(2)", + fields: &[ + FieldInfo { name: "num_variables", type_name: "usize", description: "Number of Boolean variables" }, + FieldInfo { name: "equations", type_name: "Vec>>", description: "Equations: list of polynomials, each a list of monomials, each a sorted list of variable indices" }, + ], + } +} + +inventory::submit! { + ProblemSizeFieldEntry { + name: "AlgebraicEquationsOverGF2", + fields: &["num_variables", "num_equations"], + } +} + +/// Algebraic Equations over GF(2). +/// +/// Given m multilinear polynomials over GF(2) in n variables, determine whether +/// there exists an assignment of the variables making all polynomials evaluate +/// to 0 (mod 2). +/// +/// Each equation is a list of monomials. Each monomial is a sorted list of +/// variable indices (0-indexed). An empty monomial represents the constant 1. +/// A polynomial evaluates to 0 when the XOR (sum mod 2) of all its monomial +/// values equals 0. +/// +/// # Example +/// +/// ``` +/// use problemreductions::models::algebraic::AlgebraicEquationsOverGF2; +/// use problemreductions::{Problem, Solver, BruteForce}; +/// +/// // Two equations in 3 variables: +/// // x0*x1 + x2 = 0 (mod 2) +/// // x0 + 1 = 0 (mod 2) +/// let problem = AlgebraicEquationsOverGF2::new( +/// 3, +/// vec![ +/// vec![vec![0, 1], vec![2]], // x0*x1 XOR x2 +/// vec![vec![0], vec![]], // x0 XOR 1 +/// ], +/// ).unwrap(); +/// +/// let solver = BruteForce::new(); +/// let witness = solver.find_witness(&problem); +/// assert!(witness.is_some()); +/// ``` +#[derive(Debug, Clone, Serialize)] +pub struct AlgebraicEquationsOverGF2 { + /// Number of variables. + num_variables: usize, + /// Equations: each equation is a list of monomials; + /// each monomial is a sorted list of variable indices. + equations: Vec>>, +} + +impl AlgebraicEquationsOverGF2 { + fn validate(num_variables: usize, equations: &[Vec>]) -> Result<(), String> { + for (eq_idx, equation) in equations.iter().enumerate() { + for (mono_idx, monomial) in equation.iter().enumerate() { + // Check variable indices are in range + for &var in monomial { + if var >= num_variables { + return Err(format!( + "Variable index {var} in equation {eq_idx}, monomial {mono_idx} \ + is out of range (num_variables = {num_variables})" + )); + } + } + // Check monomial is sorted and has no duplicates + for w in monomial.windows(2) { + if w[0] >= w[1] { + return Err(format!( + "Monomial {mono_idx} in equation {eq_idx} is not strictly sorted: \ + found {} >= {}", + w[0], w[1] + )); + } + } + } + } + Ok(()) + } + + /// Create a new `AlgebraicEquationsOverGF2` instance. + /// + /// Returns an error if any variable index is out of range or any monomial + /// is not strictly sorted. + pub fn new(num_variables: usize, equations: Vec>>) -> Result { + Self::validate(num_variables, &equations)?; + Ok(Self { + num_variables, + equations, + }) + } + + /// Get the number of variables. + pub fn num_variables(&self) -> usize { + self.num_variables + } + + /// Get the number of equations. + pub fn num_equations(&self) -> usize { + self.equations.len() + } + + /// Get the equations. + pub fn equations(&self) -> &[Vec>] { + &self.equations + } + + /// Evaluate a single monomial given a binary assignment. + /// + /// An empty monomial is the constant 1. + /// A non-empty monomial is the product (AND) of the indicated variables. + fn evaluate_monomial(monomial: &[usize], assignment: &[usize]) -> usize { + if monomial.is_empty() { + return 1; + } + for &var in monomial { + if assignment[var] == 0 { + return 0; + } + } + 1 + } + + /// Evaluate a single equation (polynomial) given a binary assignment. + /// + /// Returns true if the polynomial evaluates to 0 (mod 2). + fn evaluate_equation(equation: &[Vec], assignment: &[usize]) -> bool { + let sum: usize = equation + .iter() + .map(|mono| Self::evaluate_monomial(mono, assignment)) + .sum(); + sum.is_multiple_of(2) + } +} + +#[derive(Deserialize)] +struct AlgebraicEquationsOverGF2Data { + num_variables: usize, + equations: Vec>>, +} + +impl<'de> Deserialize<'de> for AlgebraicEquationsOverGF2 { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let data = AlgebraicEquationsOverGF2Data::deserialize(deserializer)?; + Self::new(data.num_variables, data.equations).map_err(D::Error::custom) + } +} + +impl Problem for AlgebraicEquationsOverGF2 { + const NAME: &'static str = "AlgebraicEquationsOverGF2"; + type Value = Or; + + fn variant() -> Vec<(&'static str, &'static str)> { + crate::variant_params![] + } + + fn dims(&self) -> Vec { + vec![2; self.num_variables] + } + + fn evaluate(&self, config: &[usize]) -> Or { + Or(self + .equations + .iter() + .all(|eq| Self::evaluate_equation(eq, config))) + } +} + +crate::declare_variants! { + default AlgebraicEquationsOverGF2 => "2^(0.6943 * num_variables)", +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_model_example_specs() -> Vec { + vec![crate::example_db::specs::ModelExampleSpec { + id: "algebraic_equations_over_gf2", + instance: Box::new( + AlgebraicEquationsOverGF2::new( + 3, + vec![ + // x0*x1 + x2 = 0 + vec![vec![0, 1], vec![2]], + // x1*x2 + x0 + 1 = 0 + vec![vec![1, 2], vec![0], vec![]], + // x0 + x1 + x2 + 1 = 0 + vec![vec![0], vec![1], vec![2], vec![]], + ], + ) + .unwrap(), + ), + // config [1,0,0]: eq1: 0*0+0=0 ✓, eq2: 0*0+1+1=0 ✓, eq3: 1+0+0+1=0 ✓ + optimal_config: vec![1, 0, 0], + optimal_value: serde_json::json!(true), + }] +} + +#[cfg(test)] +#[path = "../../unit_tests/models/algebraic/algebraic_equations_over_gf2.rs"] +mod tests; diff --git a/src/models/algebraic/minimum_matrix_domination.rs b/src/models/algebraic/minimum_matrix_domination.rs new file mode 100644 index 00000000..49fc9a2e --- /dev/null +++ b/src/models/algebraic/minimum_matrix_domination.rs @@ -0,0 +1,192 @@ +//! Minimum Matrix Domination problem implementation. +//! +//! Given an n×n binary matrix M, find a minimum subset C of 1-entries such that +//! every 1-entry not in C shares a row or column with some entry in C. + +use crate::registry::{FieldInfo, ProblemSchemaEntry}; +use crate::traits::Problem; +use crate::types::Min; +use serde::{Deserialize, Serialize}; + +inventory::submit! { + ProblemSchemaEntry { + name: "MinimumMatrixDomination", + display_name: "Minimum Matrix Domination", + aliases: &[], + dimensions: &[], + module_path: module_path!(), + description: "Find minimum subset of 1-entries in a binary matrix that dominates all other 1-entries by shared row or column", + fields: &[ + FieldInfo { name: "matrix", type_name: "Vec>", description: "n×n binary matrix M" }, + ], + } +} + +/// Minimum Matrix Domination. +/// +/// Given an n×n binary matrix M, find a minimum-cardinality subset C of +/// 1-entries such that every 1-entry not in C shares a row or column with +/// some entry in C. +/// +/// # Representation +/// +/// Each 1-entry in the matrix is a binary variable: `x_k = 1` if the k-th +/// 1-entry is selected into C. The 1-entries are enumerated in row-major +/// order. +/// +/// # Example +/// +/// ``` +/// use problemreductions::models::algebraic::MinimumMatrixDomination; +/// use problemreductions::{Problem, Solver, BruteForce}; +/// +/// // 3×3 identity matrix: 3 ones on the diagonal, no shared rows/cols +/// let matrix = vec![ +/// vec![true, false, false], +/// vec![false, true, false], +/// vec![false, false, true], +/// ]; +/// let problem = MinimumMatrixDomination::new(matrix); +/// let solver = BruteForce::new(); +/// let witness = solver.find_witness(&problem); +/// // All 3 diagonal entries must be selected (no domination possible) +/// assert_eq!(witness, Some(vec![1, 1, 1])); +/// ``` +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct MinimumMatrixDomination { + /// The binary matrix. + matrix: Vec>, + /// Positions of 1-entries in row-major order: (row, col). + ones: Vec<(usize, usize)>, +} + +impl MinimumMatrixDomination { + /// Create a new MinimumMatrixDomination instance. + /// + /// # Panics + /// + /// Panics if the matrix rows have inconsistent lengths. + pub fn new(matrix: Vec>) -> Self { + let num_cols = matrix.first().map_or(0, Vec::len); + for row in &matrix { + assert_eq!(row.len(), num_cols, "All rows must have the same length"); + } + let ones: Vec<(usize, usize)> = matrix + .iter() + .enumerate() + .flat_map(|(i, row)| { + row.iter() + .enumerate() + .filter(|(_, &v)| v) + .map(move |(j, _)| (i, j)) + }) + .collect(); + Self { matrix, ones } + } + + /// Returns a reference to the binary matrix. + pub fn matrix(&self) -> &[Vec] { + &self.matrix + } + + /// Returns the positions of 1-entries in row-major order. + pub fn ones(&self) -> &[(usize, usize)] { + &self.ones + } + + /// Returns the number of rows in the matrix. + pub fn num_rows(&self) -> usize { + self.matrix.len() + } + + /// Returns the number of columns in the matrix. + pub fn num_cols(&self) -> usize { + self.matrix.first().map_or(0, Vec::len) + } + + /// Returns the number of 1-entries in the matrix. + pub fn num_ones(&self) -> usize { + self.ones.len() + } +} + +impl Problem for MinimumMatrixDomination { + const NAME: &'static str = "MinimumMatrixDomination"; + type Value = Min; + + fn variant() -> Vec<(&'static str, &'static str)> { + crate::variant_params![] + } + + fn dims(&self) -> Vec { + vec![2; self.num_ones()] + } + + fn evaluate(&self, config: &[usize]) -> Min { + if config.len() != self.num_ones() { + return Min(None); + } + if config.iter().any(|&v| v >= 2) { + return Min(None); + } + + // Collect the set of selected 1-entry indices + let selected: Vec = config + .iter() + .enumerate() + .filter(|(_, &v)| v == 1) + .map(|(i, _)| i) + .collect(); + + // Build sets of rows and columns covered by selected entries + let mut covered_rows = std::collections::HashSet::new(); + let mut covered_cols = std::collections::HashSet::new(); + for &idx in &selected { + let (r, c) = self.ones[idx]; + covered_rows.insert(r); + covered_cols.insert(c); + } + + // Check domination: every unselected 1-entry must share a row or + // column with some selected entry + for (k, &(r, c)) in self.ones.iter().enumerate() { + if config[k] == 1 { + continue; // selected entries don't need domination + } + if !covered_rows.contains(&r) && !covered_cols.contains(&c) { + return Min(None); // not dominated + } + } + + Min(Some(selected.len())) + } +} + +crate::declare_variants! { + default MinimumMatrixDomination => "2^num_ones", +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_model_example_specs() -> Vec { + // P6 adjacency matrix (6×6, 10 ones) + // 1-entries: (0,1),(1,0),(1,2),(2,1),(2,3),(3,2),(3,4),(4,3),(4,5),(5,4) + // Optimal: select indices 0,1,7,6 -> C = {(0,1),(1,0),(4,3),(3,4)}, value = 4 + let matrix = vec![ + vec![false, true, false, false, false, false], + vec![true, false, true, false, false, false], + vec![false, true, false, true, false, false], + vec![false, false, true, false, true, false], + vec![false, false, false, true, false, true], + vec![false, false, false, false, true, false], + ]; + vec![crate::example_db::specs::ModelExampleSpec { + id: "minimum_matrix_domination", + instance: Box::new(MinimumMatrixDomination::new(matrix)), + optimal_config: vec![1, 1, 0, 0, 0, 0, 1, 1, 0, 0], + optimal_value: serde_json::json!(4), + }] +} + +#[cfg(test)] +#[path = "../../unit_tests/models/algebraic/minimum_matrix_domination.rs"] +mod tests; diff --git a/src/models/algebraic/minimum_weight_solution_to_linear_equations.rs b/src/models/algebraic/minimum_weight_solution_to_linear_equations.rs new file mode 100644 index 00000000..138425fb --- /dev/null +++ b/src/models/algebraic/minimum_weight_solution_to_linear_equations.rs @@ -0,0 +1,227 @@ +//! Minimum Weight Solution to Linear Equations problem implementation. +//! +//! Given an n×m integer matrix A and integer vector b, find a rational vector y +//! with Ay = b that minimizes the number of non-zero entries (Hamming weight). + +use crate::registry::{FieldInfo, ProblemSchemaEntry}; +use crate::traits::Problem; +use crate::types::Min; +use serde::{Deserialize, Serialize}; + +inventory::submit! { + ProblemSchemaEntry { + name: "MinimumWeightSolutionToLinearEquations", + display_name: "Minimum Weight Solution to Linear Equations", + aliases: &[], + dimensions: &[], + module_path: module_path!(), + description: "Find a rational solution to Ay=b minimizing the number of non-zero entries", + fields: &[ + FieldInfo { name: "matrix", type_name: "Vec>", description: "n×m integer matrix A" }, + FieldInfo { name: "rhs", type_name: "Vec", description: "right-hand side vector b of length n" }, + ], + } +} + +/// Minimum Weight Solution to Linear Equations. +/// +/// Given an n×m integer matrix A and an integer vector b, find a rational +/// vector y with Ay = b that minimizes ||y||_0 (the number of non-zero +/// entries, i.e., the Hamming weight of y). +/// +/// # Representation +/// +/// Each of the m columns is a binary variable: `x_j = 1` means column j is +/// selected (i.e., y_j may be non-zero). The evaluator checks whether the +/// restricted system (using only selected columns) is consistent over the +/// rationals, and returns the count of selected columns if so. +/// +/// # Example +/// +/// ``` +/// use problemreductions::models::algebraic::MinimumWeightSolutionToLinearEquations; +/// use problemreductions::{Problem, Solver, BruteForce}; +/// +/// let matrix = vec![ +/// vec![1, 2, 3, 1], +/// vec![2, 1, 1, 3], +/// ]; +/// let rhs = vec![5, 4]; +/// let problem = MinimumWeightSolutionToLinearEquations::new(matrix, rhs); +/// let solver = BruteForce::new(); +/// let witness = solver.find_witness(&problem); +/// assert!(witness.is_some()); +/// ``` +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct MinimumWeightSolutionToLinearEquations { + /// The n×m integer matrix A. + matrix: Vec>, + /// The right-hand side vector b of length n. + rhs: Vec, +} + +impl MinimumWeightSolutionToLinearEquations { + /// Create a new MinimumWeightSolutionToLinearEquations instance. + /// + /// # Panics + /// + /// Panics if the matrix is empty, rows have inconsistent lengths, + /// rhs length does not match the number of rows, or there are no columns. + pub fn new(matrix: Vec>, rhs: Vec) -> Self { + assert!(!matrix.is_empty(), "Matrix must have at least one row"); + let num_cols = matrix[0].len(); + assert!(num_cols > 0, "Matrix must have at least one column"); + for row in &matrix { + assert_eq!(row.len(), num_cols, "All rows must have the same length"); + } + assert_eq!( + rhs.len(), + matrix.len(), + "RHS length must equal number of rows" + ); + Self { matrix, rhs } + } + + /// Returns a reference to the matrix A. + pub fn matrix(&self) -> &[Vec] { + &self.matrix + } + + /// Returns a reference to the right-hand side vector b. + pub fn rhs(&self) -> &[i64] { + &self.rhs + } + + /// Returns the number of equations (rows of A). + pub fn num_equations(&self) -> usize { + self.matrix.len() + } + + /// Returns the number of variables (columns of A). + pub fn num_variables(&self) -> usize { + self.matrix[0].len() + } + + /// Check whether the system restricted to the given column indices is + /// consistent over the rationals. Uses integer Gaussian elimination on + /// the augmented matrix [A'|b] with i128 arithmetic. + fn is_consistent(&self, columns: &[usize]) -> bool { + let n = self.num_equations(); + let k = columns.len(); + + // Build augmented matrix [A'|b] as i128 to avoid overflow. + // Each row has k coefficient columns + 1 rhs column. + let mut aug: Vec> = (0..n) + .map(|i| { + let mut row = Vec::with_capacity(k + 1); + for &j in columns { + row.push(self.matrix[i][j] as i128); + } + row.push(self.rhs[i] as i128); + row + }) + .collect(); + + let mut pivot_row = 0; + for col in 0..k { + // Find a non-zero entry in column `col` at or below `pivot_row`. + let Some(swap_row) = (pivot_row..n).find(|&r| aug[r][col] != 0) else { + continue; + }; + aug.swap(pivot_row, swap_row); + + let pivot_val = aug[pivot_row][col]; + // Eliminate all other rows. + for r in 0..n { + if r == pivot_row { + continue; + } + let factor = aug[r][col]; + if factor == 0 { + continue; + } + // row[r] = pivot_val * row[r] - factor * row[pivot_row] + for c in 0..k + 1 { + aug[r][c] = pivot_val * aug[r][c] - factor * aug[pivot_row][c]; + } + } + pivot_row += 1; + } + + // Check for inconsistency: any row with all-zero coefficients but + // non-zero rhs means the system is inconsistent. + for row in &aug[pivot_row..n] { + if row[k] != 0 { + return false; + } + } + true + } +} + +impl Problem for MinimumWeightSolutionToLinearEquations { + const NAME: &'static str = "MinimumWeightSolutionToLinearEquations"; + type Value = Min; + + fn variant() -> Vec<(&'static str, &'static str)> { + crate::variant_params![] + } + + fn dims(&self) -> Vec { + vec![2; self.num_variables()] + } + + fn evaluate(&self, config: &[usize]) -> Min { + if config.len() != self.num_variables() { + return Min(None); + } + if config.iter().any(|&v| v >= 2) { + return Min(None); + } + + let columns: Vec = config + .iter() + .enumerate() + .filter(|(_, &v)| v == 1) + .map(|(j, _)| j) + .collect(); + + if columns.is_empty() { + // No columns selected — consistent iff b = 0. + if self.rhs.iter().all(|&v| v == 0) { + return Min(Some(0)); + } else { + return Min(None); + } + } + + if self.is_consistent(&columns) { + Min(Some(columns.len())) + } else { + Min(None) + } + } +} + +crate::declare_variants! { + default MinimumWeightSolutionToLinearEquations => "2^num_variables", +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_model_example_specs() -> Vec { + // A = [[1,2,3,1],[2,1,1,3]], b = [5,4], m=4, n=2 + // Config [1,1,0,0]: select columns 0,1. Submatrix [[1,2],[2,1]]. + // Solve [1,2;2,1]y=[5,4] → y=(1,2). Consistent. Min(2). + let matrix = vec![vec![1, 2, 3, 1], vec![2, 1, 1, 3]]; + let rhs = vec![5, 4]; + vec![crate::example_db::specs::ModelExampleSpec { + id: "minimum_weight_solution_to_linear_equations", + instance: Box::new(MinimumWeightSolutionToLinearEquations::new(matrix, rhs)), + optimal_config: vec![1, 1, 0, 0], + optimal_value: serde_json::json!(2), + }] +} + +#[cfg(test)] +#[path = "../../unit_tests/models/algebraic/minimum_weight_solution_to_linear_equations.rs"] +mod tests; diff --git a/src/models/algebraic/mod.rs b/src/models/algebraic/mod.rs index d95acf7c..e6fc86f6 100644 --- a/src/models/algebraic/mod.rs +++ b/src/models/algebraic/mod.rs @@ -1,6 +1,7 @@ //! Algebraic problems. //! //! Problems whose input is a matrix, linear system, or lattice: +//! - [`AlgebraicEquationsOverGF2`]: Multilinear polynomial equations over GF(2) //! - [`QUBO`]: Quadratic Unconstrained Binary Optimization //! - [`ILP`]: Integer Linear Programming //! - [`ClosestVectorProblem`]: Closest Vector Problem (minimize lattice distance) @@ -12,8 +13,11 @@ //! - [`QuadraticCongruences`]: Decide x² ≡ a (mod b) for x in {1, ..., c-1} //! - [`QuadraticDiophantineEquations`]: Decide ax² + by = c in positive integers //! - [`SimultaneousIncongruences`]: Decide whether x ≢ aᵢ (mod bᵢ) for all i simultaneously +//! - [`MinimumMatrixDomination`]: Minimum Matrix Domination (minimum dominating set of 1-entries) +//! - [`MinimumWeightSolutionToLinearEquations`]: Minimum Weight Solution to Linear Equations (minimize Hamming weight of Ay=b solution) //! - [`SparseMatrixCompression`]: Sparse Matrix Compression by row overlay +pub(crate) mod algebraic_equations_over_gf2; pub(crate) mod bmf; pub(crate) mod closest_vector_problem; pub(crate) mod consecutive_block_minimization; @@ -22,6 +26,8 @@ pub(crate) mod consecutive_ones_submatrix; pub(crate) mod equilibrium_point; pub(crate) mod feasible_basis_extension; pub(crate) mod ilp; +pub(crate) mod minimum_matrix_domination; +pub(crate) mod minimum_weight_solution_to_linear_equations; pub(crate) mod quadratic_assignment; pub(crate) mod quadratic_congruences; pub(crate) mod quadratic_diophantine_equations; @@ -29,6 +35,7 @@ pub(crate) mod qubo; pub(crate) mod simultaneous_incongruences; pub(crate) mod sparse_matrix_compression; +pub use algebraic_equations_over_gf2::AlgebraicEquationsOverGF2; pub use bmf::BMF; pub use closest_vector_problem::{ClosestVectorProblem, VarBounds}; pub use consecutive_block_minimization::ConsecutiveBlockMinimization; @@ -37,6 +44,8 @@ pub use consecutive_ones_submatrix::ConsecutiveOnesSubmatrix; pub use equilibrium_point::EquilibriumPoint; pub use feasible_basis_extension::FeasibleBasisExtension; pub use ilp::{Comparison, LinearConstraint, ObjectiveSense, VariableDomain, ILP}; +pub use minimum_matrix_domination::MinimumMatrixDomination; +pub use minimum_weight_solution_to_linear_equations::MinimumWeightSolutionToLinearEquations; pub use quadratic_assignment::QuadraticAssignment; pub use quadratic_congruences::QuadraticCongruences; pub use quadratic_diophantine_equations::QuadraticDiophantineEquations; @@ -47,6 +56,7 @@ pub use sparse_matrix_compression::SparseMatrixCompression; #[cfg(feature = "example-db")] pub(crate) fn canonical_model_example_specs() -> Vec { let mut specs = Vec::new(); + specs.extend(algebraic_equations_over_gf2::canonical_model_example_specs()); specs.extend(qubo::canonical_model_example_specs()); specs.extend(ilp::canonical_model_example_specs()); specs.extend(closest_vector_problem::canonical_model_example_specs()); @@ -55,6 +65,8 @@ pub(crate) fn canonical_model_example_specs() -> Vec Vec>", description: "Disjuncts (each a conjunction of literals) in disjunctive normal form" }, + ], + } +} + +/// Non-Tautology problem. +/// +/// Given a Boolean formula in DNF (disjunctive normal form) with disjuncts +/// D_1, ..., D_m, find a truth assignment that makes ALL disjuncts false +/// (i.e., the formula is not a tautology). +/// +/// A disjunct is a conjunction (AND) of literals. The DNF formula is the +/// disjunction (OR) of all disjuncts. The formula is false when every +/// disjunct is false, which happens when each disjunct has at least one +/// false literal. +/// +/// # Example +/// +/// ``` +/// use problemreductions::models::formula::NonTautology; +/// use problemreductions::{Problem, Solver, BruteForce}; +/// +/// // (x1 AND x2 AND x3) OR (NOT x1 AND NOT x2 AND NOT x3) +/// let problem = NonTautology::new( +/// 3, +/// vec![vec![1, 2, 3], vec![-1, -2, -3]], +/// ); +/// +/// let solver = BruteForce::new(); +/// let solution = solver.find_witness(&problem); +/// assert!(solution.is_some()); +/// ``` +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct NonTautology { + /// Number of variables. + num_vars: usize, + /// Disjuncts in DNF. Each disjunct is a conjunction of literals + /// represented as signed integers (positive = variable, negative = negation). + disjuncts: Vec>, +} + +impl NonTautology { + /// Create a new Non-Tautology problem. + /// + /// # Panics + /// Panics if any literal references a variable outside the range [1, num_vars]. + pub fn new(num_vars: usize, disjuncts: Vec>) -> Self { + for (i, disjunct) in disjuncts.iter().enumerate() { + for &lit in disjunct { + let var = lit.unsigned_abs() as usize; + assert!( + var >= 1 && var <= num_vars, + "Disjunct {} contains literal {} referencing variable {} outside range [1, {}]", + i, + lit, + var, + num_vars + ); + } + } + Self { + num_vars, + disjuncts, + } + } + + /// Get the number of variables. + pub fn num_vars(&self) -> usize { + self.num_vars + } + + /// Get the number of disjuncts. + pub fn num_disjuncts(&self) -> usize { + self.disjuncts.len() + } + + /// Get the disjuncts. + pub fn disjuncts(&self) -> &[Vec] { + &self.disjuncts + } + + /// Check if a literal is true under the given assignment. + fn literal_is_true(lit: i32, assignment: &[bool]) -> bool { + let var = lit.unsigned_abs() as usize - 1; + let value = assignment.get(var).copied().unwrap_or(false); + if lit > 0 { + value + } else { + !value + } + } + + /// Check if all disjuncts are false (the formula evaluates to false). + /// + /// A disjunct (conjunction of literals) is true iff ALL its literals are true. + /// The DNF formula is false iff ALL disjuncts are false, i.e., each disjunct + /// has at least one false literal. + pub fn is_falsifying(&self, assignment: &[bool]) -> bool { + self.disjuncts.iter().all(|disjunct| { + // A disjunct is false if at least one literal is false + !disjunct + .iter() + .all(|&lit| Self::literal_is_true(lit, assignment)) + }) + } + + /// Convert a usize config to boolean assignment. + fn config_to_assignment(config: &[usize]) -> Vec { + config.iter().map(|&v| v == 1).collect() + } +} + +impl Problem for NonTautology { + const NAME: &'static str = "NonTautology"; + type Value = crate::types::Or; + + fn dims(&self) -> Vec { + vec![2; self.num_vars] + } + + fn evaluate(&self, config: &[usize]) -> crate::types::Or { + crate::types::Or({ + let assignment = Self::config_to_assignment(config); + self.is_falsifying(&assignment) + }) + } + + fn variant() -> Vec<(&'static str, &'static str)> { + crate::variant_params![] + } +} + +crate::declare_variants! { + default NonTautology => "1.307^num_variables", +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_model_example_specs() -> Vec { + vec![crate::example_db::specs::ModelExampleSpec { + id: "non_tautology", + instance: Box::new(NonTautology::new(3, vec![vec![1, 2, 3], vec![-1, -2, -3]])), + optimal_config: vec![1, 0, 0], + optimal_value: serde_json::json!(true), + }] +} + +#[cfg(test)] +#[path = "../../unit_tests/models/formula/non_tautology.rs"] +mod tests; diff --git a/src/models/formula/one_in_three_satisfiability.rs b/src/models/formula/one_in_three_satisfiability.rs new file mode 100644 index 00000000..7feb35ad --- /dev/null +++ b/src/models/formula/one_in_three_satisfiability.rs @@ -0,0 +1,183 @@ +//! One-in-Three Satisfiability (1-in-3 SAT) problem implementation. +//! +//! 1-in-3 SAT is a variant of 3-SAT where each clause must have *exactly one* +//! true literal (rather than *at least one*). This stronger constraint makes +//! the problem NP-complete even without negations (monotone 1-in-3 SAT). + +use crate::registry::{FieldInfo, ProblemSchemaEntry}; +use crate::traits::Problem; +use serde::{Deserialize, Serialize}; + +use super::CNFClause; + +inventory::submit! { + ProblemSchemaEntry { + name: "OneInThreeSatisfiability", + display_name: "One-in-Three Satisfiability", + aliases: &[], + dimensions: &[], + module_path: module_path!(), + description: "3-SAT variant where each clause has exactly one true literal", + fields: &[ + FieldInfo { name: "num_vars", type_name: "usize", description: "Number of Boolean variables" }, + FieldInfo { name: "clauses", type_name: "Vec", description: "Clauses each with exactly 3 literals" }, + ], + } +} + +/// One-in-Three Satisfiability problem. +/// +/// Given a CNF formula where each clause has exactly 3 literals, find a truth +/// assignment such that each clause has *exactly one* true literal. +/// +/// This is a well-known NP-complete problem introduced by Schaefer (1978). +/// Unlike standard 3-SAT which requires at least one true literal per clause, +/// 1-in-3 SAT requires exactly one. +/// +/// # Example +/// +/// ``` +/// use problemreductions::models::formula::{OneInThreeSatisfiability, CNFClause}; +/// use problemreductions::{Problem, Solver, BruteForce}; +/// +/// // (x1 OR x2 OR x3) AND (NOT x1 OR x3 OR x4) AND (x2 OR NOT x3 OR NOT x4) +/// let problem = OneInThreeSatisfiability::new( +/// 4, +/// vec![ +/// CNFClause::new(vec![1, 2, 3]), +/// CNFClause::new(vec![-1, 3, 4]), +/// CNFClause::new(vec![2, -3, -4]), +/// ], +/// ); +/// +/// let solver = BruteForce::new(); +/// let solution = solver.find_witness(&problem); +/// assert!(solution.is_some()); +/// ``` +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct OneInThreeSatisfiability { + /// Number of variables. + num_vars: usize, + /// Clauses in CNF, each with exactly 3 literals. + clauses: Vec, +} + +impl OneInThreeSatisfiability { + /// Create a new 1-in-3 SAT problem. + /// + /// # Panics + /// Panics if any clause does not have exactly 3 literals, or if any + /// literal references a variable outside the range [1, num_vars]. + pub fn new(num_vars: usize, clauses: Vec) -> Self { + for (i, clause) in clauses.iter().enumerate() { + assert!( + clause.len() == 3, + "Clause {} has {} literals, expected 3", + i, + clause.len() + ); + for &lit in &clause.literals { + let var = lit.unsigned_abs() as usize; + assert!( + var >= 1 && var <= num_vars, + "Clause {} contains literal {} referencing variable {} outside range [1, {}]", + i, + lit, + var, + num_vars + ); + } + } + Self { num_vars, clauses } + } + + /// Get the number of variables. + pub fn num_vars(&self) -> usize { + self.num_vars + } + + /// Get the number of clauses. + pub fn num_clauses(&self) -> usize { + self.clauses.len() + } + + /// Get the clauses. + pub fn clauses(&self) -> &[CNFClause] { + &self.clauses + } + + /// Get a specific clause. + pub fn get_clause(&self, index: usize) -> Option<&CNFClause> { + self.clauses.get(index) + } + + /// Check if exactly one literal is true in each clause. + pub fn is_one_in_three_satisfying(&self, assignment: &[bool]) -> bool { + self.clauses.iter().all(|clause| { + let true_count = clause + .literals + .iter() + .filter(|&&lit| { + let var = lit.unsigned_abs() as usize - 1; // Convert to 0-indexed + let value = assignment.get(var).copied().unwrap_or(false); + if lit > 0 { + value + } else { + !value + } + }) + .count(); + true_count == 1 + }) + } + + /// Convert a usize config to boolean assignment. + fn config_to_assignment(config: &[usize]) -> Vec { + config.iter().map(|&v| v == 1).collect() + } +} + +impl Problem for OneInThreeSatisfiability { + const NAME: &'static str = "OneInThreeSatisfiability"; + type Value = crate::types::Or; + + fn dims(&self) -> Vec { + vec![2; self.num_vars] + } + + fn evaluate(&self, config: &[usize]) -> crate::types::Or { + crate::types::Or({ + let assignment = Self::config_to_assignment(config); + self.is_one_in_three_satisfying(&assignment) + }) + } + + fn variant() -> Vec<(&'static str, &'static str)> { + crate::variant_params![] + } +} + +crate::declare_variants! { + default OneInThreeSatisfiability => "1.307^num_variables", +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_model_example_specs() -> Vec { + vec![crate::example_db::specs::ModelExampleSpec { + id: "one_in_three_satisfiability", + instance: Box::new(OneInThreeSatisfiability::new( + 4, + vec![ + CNFClause::new(vec![1, 2, 3]), + CNFClause::new(vec![-1, 3, 4]), + CNFClause::new(vec![2, -3, -4]), + ], + )), + optimal_config: vec![1, 0, 0, 1], + optimal_value: serde_json::json!(true), + }] +} + +#[cfg(test)] +#[path = "../../unit_tests/models/formula/one_in_three_satisfiability.rs"] +mod tests; diff --git a/src/models/formula/planar_3_satisfiability.rs b/src/models/formula/planar_3_satisfiability.rs new file mode 100644 index 00000000..a3992f54 --- /dev/null +++ b/src/models/formula/planar_3_satisfiability.rs @@ -0,0 +1,180 @@ +//! Planar 3-Satisfiability (Planar 3-SAT) problem implementation. +//! +//! Planar 3-SAT is a restricted variant of 3-SAT where the variable-clause +//! incidence graph is planar. Each clause has exactly 3 literals. This +//! restriction preserves NP-completeness while enabling reductions to +//! geometric and planar problems. + +use crate::registry::{FieldInfo, ProblemSchemaEntry}; +use crate::traits::Problem; +use serde::{Deserialize, Serialize}; + +use super::CNFClause; + +inventory::submit! { + ProblemSchemaEntry { + name: "Planar3Satisfiability", + display_name: "Planar 3-Satisfiability", + aliases: &[], + dimensions: &[], + module_path: module_path!(), + description: "3-SAT with planar variable-clause incidence graph", + fields: &[ + FieldInfo { name: "num_vars", type_name: "usize", description: "Number of Boolean variables" }, + FieldInfo { name: "clauses", type_name: "Vec", description: "Clauses each with exactly 3 literals" }, + ], + } +} + +/// Planar 3-Satisfiability problem. +/// +/// Given a 3-CNF formula where each clause has exactly 3 literals and the +/// variable-clause incidence graph is planar, find a satisfying assignment. +/// +/// The incidence graph H(F) is a bipartite graph with variable nodes and +/// clause nodes, where an edge connects variable v to clause C if v appears +/// (positively or negatively) in C. The formula is a valid Planar 3-SAT +/// instance if H(F) is planar. +/// +/// **Note:** Planarity of the incidence graph is NOT validated at construction +/// time. Only the clause width (exactly 3 literals) and variable index range +/// are validated. This is analogous to how `PlanarGraph` does not explicitly +/// validate planarity in this codebase. +/// +/// # Example +/// +/// ``` +/// use problemreductions::models::formula::{Planar3Satisfiability, CNFClause}; +/// use problemreductions::{Problem, Solver, BruteForce}; +/// +/// // Formula: (x1 OR x2 OR x3) AND (NOT x1 OR x2 OR x4) +/// // AND (x1 OR NOT x3 OR x4) AND (NOT x2 OR x3 OR NOT x4) +/// let problem = Planar3Satisfiability::new( +/// 4, +/// vec![ +/// CNFClause::new(vec![1, 2, 3]), +/// CNFClause::new(vec![-1, 2, 4]), +/// CNFClause::new(vec![1, -3, 4]), +/// CNFClause::new(vec![-2, 3, -4]), +/// ], +/// ); +/// +/// let solver = BruteForce::new(); +/// let solution = solver.find_witness(&problem); +/// assert!(solution.is_some()); +/// ``` +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Planar3Satisfiability { + /// Number of variables. + num_vars: usize, + /// Clauses in CNF, each with exactly 3 literals. + clauses: Vec, +} + +impl Planar3Satisfiability { + /// Create a new Planar 3-SAT problem. + /// + /// # Panics + /// Panics if any clause does not have exactly 3 literals, or if any + /// literal references a variable outside the range [1, num_vars]. + /// + /// **Note:** Planarity of the incidence graph is not checked. + pub fn new(num_vars: usize, clauses: Vec) -> Self { + for (i, clause) in clauses.iter().enumerate() { + assert!( + clause.len() == 3, + "Clause {} has {} literals, expected 3", + i, + clause.len() + ); + for &lit in &clause.literals { + let var = lit.unsigned_abs() as usize; + assert!( + var >= 1 && var <= num_vars, + "Clause {} contains literal {} referencing variable {} outside range [1, {}]", + i, + lit, + var, + num_vars + ); + } + } + Self { num_vars, clauses } + } + + /// Get the number of variables. + pub fn num_vars(&self) -> usize { + self.num_vars + } + + /// Get the number of clauses. + pub fn num_clauses(&self) -> usize { + self.clauses.len() + } + + /// Get the clauses. + pub fn clauses(&self) -> &[CNFClause] { + &self.clauses + } + + /// Get a specific clause. + pub fn get_clause(&self, index: usize) -> Option<&CNFClause> { + self.clauses.get(index) + } + + /// Check if an assignment satisfies all clauses. + pub fn is_satisfying(&self, assignment: &[bool]) -> bool { + self.clauses.iter().all(|c| c.is_satisfied(assignment)) + } + + /// Convert a usize config to boolean assignment. + fn config_to_assignment(config: &[usize]) -> Vec { + config.iter().map(|&v| v == 1).collect() + } +} + +impl Problem for Planar3Satisfiability { + const NAME: &'static str = "Planar3Satisfiability"; + type Value = crate::types::Or; + + fn dims(&self) -> Vec { + vec![2; self.num_vars] + } + + fn evaluate(&self, config: &[usize]) -> crate::types::Or { + crate::types::Or({ + let assignment = Self::config_to_assignment(config); + self.is_satisfying(&assignment) + }) + } + + fn variant() -> Vec<(&'static str, &'static str)> { + crate::variant_params![] + } +} + +crate::declare_variants! { + default Planar3Satisfiability => "1.307^num_variables", +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_model_example_specs() -> Vec { + vec![crate::example_db::specs::ModelExampleSpec { + id: "planar_3_satisfiability", + instance: Box::new(Planar3Satisfiability::new( + 4, + vec![ + CNFClause::new(vec![1, 2, 3]), + CNFClause::new(vec![-1, 2, 4]), + CNFClause::new(vec![1, -3, 4]), + CNFClause::new(vec![-2, 3, -4]), + ], + )), + optimal_config: vec![1, 1, 1, 0], + optimal_value: serde_json::json!(true), + }] +} + +#[cfg(test)] +#[path = "../../unit_tests/models/formula/planar_3_satisfiability.rs"] +mod tests; diff --git a/src/models/graph/bounded_diameter_spanning_tree.rs b/src/models/graph/bounded_diameter_spanning_tree.rs new file mode 100644 index 00000000..217e203b --- /dev/null +++ b/src/models/graph/bounded_diameter_spanning_tree.rs @@ -0,0 +1,310 @@ +//! Bounded Diameter Spanning Tree problem implementation. +//! +//! Given a graph G = (V, E) with edge weights, a weight bound B, and a diameter +//! bound D, determine whether G has a spanning tree with total weight at most B +//! and diameter (longest shortest path in edges) at most D. + +use crate::registry::{FieldInfo, ProblemSchemaEntry, VariantDimension}; +use crate::topology::{Graph, SimpleGraph}; +use crate::traits::Problem; +use crate::types::WeightElement; +use crate::variant::VariantParam; +use num_traits::Zero; +use serde::{Deserialize, Serialize}; +use std::collections::VecDeque; + +inventory::submit! { + ProblemSchemaEntry { + name: "BoundedDiameterSpanningTree", + display_name: "Bounded Diameter Spanning Tree", + aliases: &[], + dimensions: &[ + VariantDimension::new("graph", "SimpleGraph", &["SimpleGraph"]), + VariantDimension::new("weight", "i32", &["i32"]), + ], + module_path: module_path!(), + description: "Does G have a spanning tree with total weight <= B and diameter <= D?", + fields: &[ + FieldInfo { name: "graph", type_name: "G", description: "The underlying graph G=(V,E)" }, + FieldInfo { name: "edge_weights", type_name: "Vec", description: "Edge weights w: E -> ZZ_(> 0)" }, + FieldInfo { name: "weight_bound", type_name: "W::Sum", description: "Upper bound B on total tree weight" }, + FieldInfo { name: "diameter_bound", type_name: "usize", description: "Upper bound D on tree diameter (in edges)" }, + ], + } +} + +/// Bounded Diameter Spanning Tree problem. +/// +/// Given an undirected graph G = (V, E) with positive edge weights w(e), a +/// weight bound B, and a diameter bound D, determine whether G contains a +/// spanning tree T such that the total weight of T is at most B and the +/// diameter of T (the longest shortest path measured in number of edges) is +/// at most D. +/// +/// Each configuration entry corresponds to an edge (in the order returned by +/// `graph.edges()`), with value 0 (not selected) or 1 (selected). +/// +/// # Type Parameters +/// +/// * `G` - Graph type (e.g., SimpleGraph) +/// * `W` - Edge weight type (e.g., i32) +/// +/// # Example +/// +/// ``` +/// use problemreductions::models::graph::BoundedDiameterSpanningTree; +/// use problemreductions::topology::SimpleGraph; +/// use problemreductions::{Problem, Solver, BruteForce}; +/// +/// let graph = SimpleGraph::new(5, vec![(0,1),(0,2),(0,3),(1,2),(1,4),(2,3),(3,4)]); +/// let problem = BoundedDiameterSpanningTree::new(graph, vec![1,2,1,1,2,1,1], 5, 3); +/// +/// let solver = BruteForce::new(); +/// let solution = solver.find_witness(&problem); +/// assert!(solution.is_some()); +/// ``` +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(bound( + deserialize = "G: serde::Deserialize<'de>, W: serde::Deserialize<'de>, W::Sum: serde::Deserialize<'de>" +))] +pub struct BoundedDiameterSpanningTree { + /// The underlying graph. + graph: G, + /// Weight for each edge in graph-edge order. + edge_weights: Vec, + /// Upper bound B on total tree weight. + weight_bound: W::Sum, + /// Upper bound D on tree diameter (in edges). + diameter_bound: usize, + /// Ordered edge list (mirrors `graph.edges()` order). + edge_list: Vec<(usize, usize)>, +} + +impl BoundedDiameterSpanningTree { + /// Create a new Bounded Diameter Spanning Tree instance. + /// + /// # Panics + /// Panics if `edge_weights` length does not match the graph's edge count, + /// if any edge weight is not positive, or if `diameter_bound` is zero. + pub fn new( + graph: G, + edge_weights: Vec, + weight_bound: W::Sum, + diameter_bound: usize, + ) -> Self { + assert_eq!( + edge_weights.len(), + graph.num_edges(), + "edge_weights length must match num_edges" + ); + let zero = W::Sum::zero(); + assert!( + edge_weights.iter().all(|w| w.to_sum() > zero.clone()), + "All edge weights must be positive (> 0)" + ); + assert!(weight_bound > zero, "weight_bound must be positive (> 0)"); + assert!(diameter_bound >= 1, "diameter_bound must be at least 1"); + let edge_list = graph.edges(); + Self { + graph, + edge_weights, + weight_bound, + diameter_bound, + edge_list, + } + } + + /// Get a reference to the underlying graph. + pub fn graph(&self) -> &G { + &self.graph + } + + /// Get the edge weights. + pub fn edge_weights(&self) -> &[W] { + &self.edge_weights + } + + /// Set new edge weights. + pub fn set_weights(&mut self, edge_weights: Vec) { + assert_eq!( + edge_weights.len(), + self.graph.num_edges(), + "edge_weights length must match num_edges" + ); + let zero = W::Sum::zero(); + assert!( + edge_weights.iter().all(|w| w.to_sum() > zero.clone()), + "All edge weights must be positive (> 0)" + ); + self.edge_weights = edge_weights; + } + + /// Get the weight bound B. + pub fn weight_bound(&self) -> &W::Sum { + &self.weight_bound + } + + /// Get the diameter bound D. + pub fn diameter_bound(&self) -> usize { + self.diameter_bound + } + + /// Get the number of vertices in the underlying graph. + pub fn num_vertices(&self) -> usize { + self.graph.num_vertices() + } + + /// Get the number of edges in the underlying graph. + pub fn num_edges(&self) -> usize { + self.graph.num_edges() + } + + /// Get the ordered edge list. + pub fn edge_list(&self) -> &[(usize, usize)] { + &self.edge_list + } + + /// Check whether this problem uses a non-unit weight type. + pub fn is_weighted(&self) -> bool { + !W::IS_UNIT + } + + /// Compute the diameter of a tree given its adjacency list. + /// The diameter is the length (in number of edges) of the longest shortest + /// path between any two vertices in the tree. + fn tree_diameter(adj: &[Vec], n: usize) -> usize { + let mut max_dist = 0; + for start in 0..n { + if adj[start].is_empty() { + continue; + } + let mut dist = vec![usize::MAX; n]; + dist[start] = 0; + let mut queue = VecDeque::new(); + queue.push_back(start); + while let Some(v) = queue.pop_front() { + for &u in &adj[v] { + if dist[u] == usize::MAX { + dist[u] = dist[v] + 1; + if dist[u] > max_dist { + max_dist = dist[u]; + } + queue.push_back(u); + } + } + } + } + max_dist + } +} + +impl Problem for BoundedDiameterSpanningTree +where + G: Graph + VariantParam, + W: WeightElement + VariantParam, +{ + const NAME: &'static str = "BoundedDiameterSpanningTree"; + type Value = crate::types::Or; + + fn variant() -> Vec<(&'static str, &'static str)> { + crate::variant_params![G, W] + } + + fn dims(&self) -> Vec { + vec![2; self.edge_list.len()] + } + + fn evaluate(&self, config: &[usize]) -> crate::types::Or { + crate::types::Or({ + let n = self.graph.num_vertices(); + if config.len() != self.edge_list.len() { + return crate::types::Or(false); + } + + // Collect selected edges + let selected_indices: Vec = config + .iter() + .enumerate() + .filter(|(_, &v)| v == 1) + .map(|(i, _)| i) + .collect(); + + // A spanning tree on n vertices must have exactly n-1 edges + if n == 0 { + return crate::types::Or(selected_indices.is_empty()); + } + if selected_indices.len() != n - 1 { + return crate::types::Or(false); + } + + // Build adjacency list and compute total weight + let mut adj: Vec> = vec![Vec::new(); n]; + let mut total_weight = W::Sum::zero(); + for &idx in &selected_indices { + let (u, v) = self.edge_list[idx]; + adj[u].push(v); + adj[v].push(u); + total_weight += self.edge_weights[idx].to_sum(); + } + + // Check weight bound + if total_weight > self.weight_bound.clone() { + return crate::types::Or(false); + } + + // Check connectivity using BFS + let mut visited = vec![false; n]; + let mut queue = VecDeque::new(); + visited[0] = true; + queue.push_back(0); + let mut count = 1; + while let Some(v) = queue.pop_front() { + for &u in &adj[v] { + if !visited[u] { + visited[u] = true; + count += 1; + queue.push_back(u); + } + } + } + + if count != n { + return crate::types::Or(false); + } + + // Check diameter bound (BFS from each vertex) + let diameter = Self::tree_diameter(&adj, n); + diameter <= self.diameter_bound + }) + } +} + +crate::declare_variants! { + default BoundedDiameterSpanningTree => "num_vertices ^ num_vertices", +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_model_example_specs() -> Vec { + // 5 vertices, 7 edges with weights: (0,1,1),(0,2,2),(0,3,1),(1,2,1),(1,4,2),(2,3,1),(3,4,1) + // B=5, D=3 + // Tree: edges (0,1),(0,3),(2,3),(3,4) → edge indices 0,2,5,6 + // Config: [1,0,1,0,0,1,1] → weight = 1+1+1+1 = 4 ≤ 5, diameter = 3 ≤ 3 + vec![crate::example_db::specs::ModelExampleSpec { + id: "bounded_diameter_spanning_tree_simplegraph_i32", + instance: Box::new(BoundedDiameterSpanningTree::new( + SimpleGraph::new( + 5, + vec![(0, 1), (0, 2), (0, 3), (1, 2), (1, 4), (2, 3), (3, 4)], + ), + vec![1, 2, 1, 1, 2, 1, 1], + 5, + 3, + )), + optimal_config: vec![1, 0, 1, 0, 0, 1, 1], + optimal_value: serde_json::json!(true), + }] +} + +#[cfg(test)] +#[path = "../../unit_tests/models/graph/bounded_diameter_spanning_tree.rs"] +mod tests; diff --git a/src/models/graph/degree_constrained_spanning_tree.rs b/src/models/graph/degree_constrained_spanning_tree.rs new file mode 100644 index 00000000..47338a8f --- /dev/null +++ b/src/models/graph/degree_constrained_spanning_tree.rs @@ -0,0 +1,209 @@ +//! Degree-Constrained Spanning Tree problem implementation. +//! +//! Given a graph G = (V, E) and a positive integer K, determine whether G has +//! a spanning tree in which every vertex has degree at most K. + +use crate::registry::{FieldInfo, ProblemSchemaEntry, VariantDimension}; +use crate::topology::{Graph, SimpleGraph}; +use crate::traits::Problem; +use crate::variant::VariantParam; +use serde::{Deserialize, Serialize}; +use std::collections::VecDeque; + +inventory::submit! { + ProblemSchemaEntry { + name: "DegreeConstrainedSpanningTree", + display_name: "Degree-Constrained Spanning Tree", + aliases: &[], + dimensions: &[ + VariantDimension::new("graph", "SimpleGraph", &["SimpleGraph"]), + ], + module_path: module_path!(), + description: "Does G have a spanning tree with maximum vertex degree at most K?", + fields: &[ + FieldInfo { name: "graph", type_name: "G", description: "The underlying graph G=(V,E)" }, + FieldInfo { name: "max_degree", type_name: "usize", description: "max_degree: maximum allowed vertex degree K (>= 1)" }, + ], + } +} + +/// Degree-Constrained Spanning Tree problem. +/// +/// Given an undirected graph G = (V, E) and a positive integer K, determine +/// whether G contains a spanning tree T such that every vertex in T has degree +/// at most K. +/// +/// Each configuration entry corresponds to an edge (in the order returned by +/// `graph.edges()`), with value 0 (not selected) or 1 (selected). +/// +/// # Type Parameters +/// +/// * `G` - Graph type (e.g., SimpleGraph) +/// +/// # Example +/// +/// ``` +/// use problemreductions::models::graph::DegreeConstrainedSpanningTree; +/// use problemreductions::topology::SimpleGraph; +/// use problemreductions::{Problem, Solver, BruteForce}; +/// +/// let graph = SimpleGraph::new(4, vec![(0,1),(1,2),(2,3),(0,3)]); +/// let problem = DegreeConstrainedSpanningTree::new(graph, 2); +/// +/// let solver = BruteForce::new(); +/// let solution = solver.find_witness(&problem); +/// assert!(solution.is_some()); +/// ``` +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(bound(deserialize = "G: serde::Deserialize<'de>"))] +pub struct DegreeConstrainedSpanningTree { + /// The underlying graph. + graph: G, + /// Maximum allowed vertex degree in the spanning tree. + max_degree: usize, + /// Ordered edge list (mirrors `graph.edges()` order). + edge_list: Vec<(usize, usize)>, +} + +impl DegreeConstrainedSpanningTree { + /// Create a new Degree-Constrained Spanning Tree instance. + /// + /// # Panics + /// Panics if `max_degree` is zero. + pub fn new(graph: G, max_degree: usize) -> Self { + assert!(max_degree >= 1, "max_degree must be at least 1"); + let edge_list = graph.edges(); + Self { + graph, + max_degree, + edge_list, + } + } + + /// Get a reference to the underlying graph. + pub fn graph(&self) -> &G { + &self.graph + } + + /// Get the max_degree parameter K. + pub fn max_degree(&self) -> usize { + self.max_degree + } + + /// Get the number of vertices in the underlying graph. + pub fn num_vertices(&self) -> usize { + self.graph.num_vertices() + } + + /// Get the number of edges in the underlying graph. + pub fn num_edges(&self) -> usize { + self.graph.num_edges() + } + + /// Get the ordered edge list. + pub fn edge_list(&self) -> &[(usize, usize)] { + &self.edge_list + } +} + +impl Problem for DegreeConstrainedSpanningTree +where + G: Graph + VariantParam, +{ + const NAME: &'static str = "DegreeConstrainedSpanningTree"; + type Value = crate::types::Or; + + fn variant() -> Vec<(&'static str, &'static str)> { + crate::variant_params![G] + } + + fn dims(&self) -> Vec { + vec![2; self.edge_list.len()] + } + + fn evaluate(&self, config: &[usize]) -> crate::types::Or { + crate::types::Or({ + let n = self.graph.num_vertices(); + if config.len() != self.edge_list.len() { + return crate::types::Or(false); + } + + // Collect selected edges + let selected: Vec<(usize, usize)> = config + .iter() + .enumerate() + .filter(|(_, &v)| v == 1) + .map(|(i, _)| self.edge_list[i]) + .collect(); + + // A spanning tree on n vertices must have exactly n-1 edges + if n == 0 { + return crate::types::Or(selected.is_empty()); + } + if selected.len() != n - 1 { + return crate::types::Or(false); + } + + // Check connectivity using BFS on selected edges + let mut adj: Vec> = vec![Vec::new(); n]; + let mut degree = vec![0usize; n]; + for &(u, v) in &selected { + adj[u].push(v); + adj[v].push(u); + degree[u] += 1; + degree[v] += 1; + } + + // Check max degree constraint + if degree.iter().any(|&d| d > self.max_degree) { + return crate::types::Or(false); + } + + // BFS to check connectivity + let mut visited = vec![false; n]; + let mut queue = VecDeque::new(); + visited[0] = true; + queue.push_back(0); + let mut count = 1; + while let Some(v) = queue.pop_front() { + for &u in &adj[v] { + if !visited[u] { + visited[u] = true; + count += 1; + queue.push_back(u); + } + } + } + + count == n + }) + } +} + +crate::declare_variants! { + default DegreeConstrainedSpanningTree => "2^num_vertices", +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_model_example_specs() -> Vec { + // 5 vertices, 7 edges: (0,1),(0,2),(0,3),(1,2),(1,4),(2,3),(3,4), K=2 + // Spanning tree with max degree 2: edges (0,2),(0,3),(1,2),(1,4) + // indices: 1,2,3,4 → config [0,1,1,1,1,0,0] + // Degrees: 0→{2,3}=2, 1→{2,4}=2, 2→{0,1}=2, 3→{0}=1, 4→{1}=1 + vec![crate::example_db::specs::ModelExampleSpec { + id: "degree_constrained_spanning_tree_simplegraph", + instance: Box::new(DegreeConstrainedSpanningTree::new( + SimpleGraph::new( + 5, + vec![(0, 1), (0, 2), (0, 3), (1, 2), (1, 4), (2, 3), (3, 4)], + ), + 2, + )), + optimal_config: vec![0, 1, 1, 1, 1, 0, 0], + optimal_value: serde_json::json!(true), + }] +} + +#[cfg(test)] +#[path = "../../unit_tests/models/graph/degree_constrained_spanning_tree.rs"] +mod tests; diff --git a/src/models/graph/kernel.rs b/src/models/graph/kernel.rs new file mode 100644 index 00000000..72b3e1b5 --- /dev/null +++ b/src/models/graph/kernel.rs @@ -0,0 +1,153 @@ +//! Kernel problem implementation. +//! +//! The Kernel problem asks whether a directed graph contains a kernel, i.e., +//! a subset of vertices that is both independent (no arc between any two +//! selected vertices) and absorbing (every unselected vertex has an arc to +//! some selected vertex). + +use crate::registry::{FieldInfo, ProblemSchemaEntry, VariantDimension}; +use crate::topology::DirectedGraph; +use crate::traits::Problem; +use serde::{Deserialize, Serialize}; + +inventory::submit! { + ProblemSchemaEntry { + name: "Kernel", + display_name: "Kernel", + aliases: &[], + dimensions: &[ + VariantDimension::new("graph", "DirectedGraph", &["DirectedGraph"]), + ], + module_path: module_path!(), + description: "Does the directed graph contain a kernel (independent and absorbing vertex subset)?", + fields: &[ + FieldInfo { name: "graph", type_name: "DirectedGraph", description: "The directed graph G=(V,A)" }, + ], + } +} + +/// The Kernel problem. +/// +/// Given a directed graph G = (V, A), find a kernel V' ⊆ V such that: +/// 1. **Independence:** no two vertices in V' are joined by an arc (neither +/// (u,v) nor (v,u) is in A for any u,v ∈ V'). +/// 2. **Absorption:** every vertex u ∉ V' has an arc to some vertex v ∈ V' +/// (i.e., (u,v) ∈ A). +/// +/// # Representation +/// +/// A configuration is a binary vector of length |V|, where `config[v] = 1` +/// means vertex v is selected into V'. +/// +/// # Example +/// +/// ``` +/// use problemreductions::models::graph::Kernel; +/// use problemreductions::topology::DirectedGraph; +/// use problemreductions::{Problem, Solver, BruteForce}; +/// +/// let graph = DirectedGraph::new(5, vec![ +/// (0,1),(0,2),(1,3),(2,3),(3,4),(4,0),(4,1), +/// ]); +/// let problem = Kernel::new(graph); +/// let solver = BruteForce::new(); +/// let solution = solver.find_witness(&problem); +/// assert!(solution.is_some()); +/// ``` +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Kernel { + graph: DirectedGraph, +} + +impl Kernel { + /// Create a new Kernel problem from a directed graph. + pub fn new(graph: DirectedGraph) -> Self { + Self { graph } + } + + /// Get a reference to the underlying directed graph. + pub fn graph(&self) -> &DirectedGraph { + &self.graph + } + + /// Get the number of vertices in the directed graph. + pub fn num_vertices(&self) -> usize { + self.graph.num_vertices() + } + + /// Get the number of arcs in the directed graph. + pub fn num_arcs(&self) -> usize { + self.graph.num_arcs() + } +} + +impl Problem for Kernel { + const NAME: &'static str = "Kernel"; + type Value = crate::types::Or; + + fn variant() -> Vec<(&'static str, &'static str)> { + crate::variant_params![] + } + + fn dims(&self) -> Vec { + vec![2; self.graph.num_vertices()] + } + + fn evaluate(&self, config: &[usize]) -> crate::types::Or { + let n = self.graph.num_vertices(); + + // Collect selected vertices + let selected: Vec = config.iter().map(|&c| c == 1).collect(); + + // Independence: no arc between any two selected vertices + for u in 0..n { + if !selected[u] { + continue; + } + // Check that no successor of u is also selected + for &v in &self.graph.successors(u) { + if selected[v] { + return crate::types::Or(false); + } + } + } + + // Absorption: every unselected vertex must have an arc to some selected vertex + for u in 0..n { + if selected[u] { + continue; + } + let has_arc_to_selected = self.graph.successors(u).iter().any(|&v| selected[v]); + if !has_arc_to_selected { + return crate::types::Or(false); + } + } + + crate::types::Or(true) + } +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_model_example_specs() -> Vec { + // 5 vertices, arcs: (0,1),(0,2),(1,3),(2,3),(3,4),(4,0),(4,1) + // Kernel: V' = {0, 3} → config [1,0,0,1,0] + let graph = DirectedGraph::new( + 5, + vec![(0, 1), (0, 2), (1, 3), (2, 3), (3, 4), (4, 0), (4, 1)], + ); + let optimal_config = vec![1, 0, 0, 1, 0]; + vec![crate::example_db::specs::ModelExampleSpec { + id: "kernel", + instance: Box::new(Kernel::new(graph)), + optimal_config, + optimal_value: serde_json::json!(true), + }] +} + +crate::declare_variants! { + default Kernel => "2^num_vertices", +} + +#[cfg(test)] +#[path = "../../unit_tests/models/graph/kernel.rs"] +mod tests; diff --git a/src/models/graph/maximum_achromatic_number.rs b/src/models/graph/maximum_achromatic_number.rs new file mode 100644 index 00000000..b45aa0df --- /dev/null +++ b/src/models/graph/maximum_achromatic_number.rs @@ -0,0 +1,179 @@ +//! Maximum Achromatic Number problem implementation. +//! +//! Given a graph G = (V, E), find a proper coloring that uses the maximum +//! number of colors such that the coloring is also complete: for every pair +//! of distinct colors, there exists an edge connecting a vertex of one color +//! to a vertex of the other. + +use crate::registry::{FieldInfo, ProblemSchemaEntry, VariantDimension}; +use crate::topology::{Graph, SimpleGraph}; +use crate::traits::Problem; +use crate::types::Max; +use serde::{Deserialize, Serialize}; +use std::collections::HashSet; + +inventory::submit! { + ProblemSchemaEntry { + name: "MaximumAchromaticNumber", + display_name: "Maximum Achromatic Number", + aliases: &[], + dimensions: &[ + VariantDimension::new("graph", "SimpleGraph", &["SimpleGraph"]), + ], + module_path: module_path!(), + description: "Find a complete proper coloring maximizing the number of colors", + fields: &[ + FieldInfo { name: "graph", type_name: "G", description: "The underlying graph G=(V,E)" }, + ], + } +} + +/// The Maximum Achromatic Number problem. +/// +/// Given a graph G = (V, E), find a proper coloring of the vertices using the +/// maximum number of colors such that the coloring is *complete*: for every +/// pair of distinct colors used, there exists at least one edge between a +/// vertex of one color and a vertex of the other. +/// +/// Variables: one per vertex, each selecting a color class (0..n-1). +/// +/// # Type Parameters +/// +/// * `G` - The graph type (e.g., `SimpleGraph`) +/// +/// # Example +/// +/// ``` +/// use problemreductions::models::graph::MaximumAchromaticNumber; +/// use problemreductions::topology::SimpleGraph; +/// use problemreductions::{Problem, Solver, BruteForce}; +/// +/// // C6: achromatic number is 3 +/// let graph = SimpleGraph::new(6, vec![(0,1),(1,2),(2,3),(3,4),(4,5),(5,0)]); +/// let problem = MaximumAchromaticNumber::new(graph); +/// +/// let solver = BruteForce::new(); +/// let solution = solver.find_witness(&problem).unwrap(); +/// let value = problem.evaluate(&solution); +/// assert_eq!(value, problemreductions::types::Max(Some(3))); +/// ``` +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct MaximumAchromaticNumber { + /// The underlying graph. + graph: G, +} + +impl MaximumAchromaticNumber { + /// Create a MaximumAchromaticNumber problem from a graph. + pub fn new(graph: G) -> Self { + Self { graph } + } + + /// Get a reference to the underlying graph. + pub fn graph(&self) -> &G { + &self.graph + } + + /// Get the number of vertices in the underlying graph. + pub fn num_vertices(&self) -> usize { + self.graph.num_vertices() + } + + /// Get the number of edges in the underlying graph. + pub fn num_edges(&self) -> usize { + self.graph.num_edges() + } + + /// Check whether a configuration is a proper coloring. + /// + /// A proper coloring assigns colors to vertices such that no two adjacent + /// vertices share the same color. + pub fn is_proper_coloring(&self, config: &[usize]) -> bool { + for (u, v) in self.graph.edges() { + if config[u] == config[v] { + return false; + } + } + true + } + + /// Check whether a proper coloring is complete. + /// + /// A coloring is complete if for every pair of distinct colors used, + /// there exists an edge between a vertex of one color and a vertex + /// of the other. + pub fn is_complete_coloring(&self, config: &[usize]) -> bool { + let used_colors: HashSet = config.iter().copied().collect(); + let colors: Vec = used_colors.into_iter().collect(); + + for i in 0..colors.len() { + for j in (i + 1)..colors.len() { + let c1 = colors[i]; + let c2 = colors[j]; + let has_edge = self.graph.edges().iter().any(|&(u, v)| { + (config[u] == c1 && config[v] == c2) || (config[u] == c2 && config[v] == c1) + }); + if !has_edge { + return false; + } + } + } + true + } +} + +impl Problem for MaximumAchromaticNumber +where + G: Graph + crate::variant::VariantParam, +{ + const NAME: &'static str = "MaximumAchromaticNumber"; + type Value = Max; + + fn variant() -> Vec<(&'static str, &'static str)> { + crate::variant_params![G] + } + + fn dims(&self) -> Vec { + vec![self.graph.num_vertices(); self.graph.num_vertices()] + } + + fn evaluate(&self, config: &[usize]) -> Max { + if config.len() != self.graph.num_vertices() { + return Max(None); + } + if self.graph.num_vertices() == 0 { + return Max(Some(0)); + } + if !self.is_proper_coloring(config) { + return Max(None); + } + if !self.is_complete_coloring(config) { + return Max(None); + } + let distinct_colors: HashSet = config.iter().copied().collect(); + Max(Some(distinct_colors.len())) + } +} + +crate::declare_variants! { + default MaximumAchromaticNumber => "num_vertices^num_vertices", +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_model_example_specs() -> Vec { + // C6: 6-cycle, achromatic number = 3 + // Coloring [0, 1, 2, 0, 1, 2] uses 3 colors and is both proper and complete. + vec![crate::example_db::specs::ModelExampleSpec { + id: "maximum_achromatic_number_simplegraph", + instance: Box::new(MaximumAchromaticNumber::new(SimpleGraph::new( + 6, + vec![(0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (5, 0)], + ))), + optimal_config: vec![0, 1, 2, 0, 1, 2], + optimal_value: serde_json::json!(3), + }] +} + +#[cfg(test)] +#[path = "../../unit_tests/models/graph/maximum_achromatic_number.rs"] +mod tests; diff --git a/src/models/graph/minimum_covering_by_cliques.rs b/src/models/graph/minimum_covering_by_cliques.rs new file mode 100644 index 00000000..bda7f3cd --- /dev/null +++ b/src/models/graph/minimum_covering_by_cliques.rs @@ -0,0 +1,195 @@ +//! Minimum Covering by Cliques problem implementation. +//! +//! Given a graph G = (V, E), find a minimum number of cliques whose union +//! covers every edge in E. + +use crate::registry::{FieldInfo, ProblemSchemaEntry, VariantDimension}; +use crate::topology::{Graph, SimpleGraph}; +use crate::traits::Problem; +use crate::types::Min; +use serde::{Deserialize, Serialize}; +use std::collections::HashSet; + +inventory::submit! { + ProblemSchemaEntry { + name: "MinimumCoveringByCliques", + display_name: "Minimum Covering by Cliques", + aliases: &[], + dimensions: &[ + VariantDimension::new("graph", "SimpleGraph", &["SimpleGraph"]), + ], + module_path: module_path!(), + description: "Find minimum number of cliques covering all edges", + fields: &[ + FieldInfo { name: "graph", type_name: "G", description: "The underlying graph G=(V,E)" }, + ], + } +} + +/// The Minimum Covering by Cliques problem. +/// +/// Given a graph G = (V, E), find a collection of cliques C_1, ..., C_k +/// in G such that every edge is contained in at least one clique, +/// and k is minimized. +/// +/// Variables: one per edge, each selecting which clique group covers it. +/// Each edge can be assigned to one of at most |E| groups (upper bound). +/// +/// # Type Parameters +/// +/// * `G` - The graph type (e.g., `SimpleGraph`) +/// +/// # Example +/// +/// ``` +/// use problemreductions::models::graph::MinimumCoveringByCliques; +/// use problemreductions::topology::SimpleGraph; +/// use problemreductions::{Problem, Solver, BruteForce}; +/// +/// // Triangle: 3 edges can be covered by 1 clique +/// let graph = SimpleGraph::new(3, vec![(0, 1), (1, 2), (0, 2)]); +/// let problem = MinimumCoveringByCliques::new(graph); +/// +/// let solver = BruteForce::new(); +/// let solution = solver.find_witness(&problem).unwrap(); +/// let value = problem.evaluate(&solution); +/// assert_eq!(value, problemreductions::types::Min(Some(1))); +/// ``` +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct MinimumCoveringByCliques { + /// The underlying graph. + graph: G, +} + +impl MinimumCoveringByCliques { + /// Create a MinimumCoveringByCliques problem from a graph. + pub fn new(graph: G) -> Self { + Self { graph } + } + + /// Get a reference to the underlying graph. + pub fn graph(&self) -> &G { + &self.graph + } + + /// Get the number of vertices in the underlying graph. + pub fn num_vertices(&self) -> usize { + self.graph.num_vertices() + } + + /// Get the number of edges in the underlying graph. + pub fn num_edges(&self) -> usize { + self.graph.num_edges() + } + + /// Check whether a configuration is a valid edge clique cover. + /// + /// For each group index used, the edges assigned to it must form a clique: + /// all vertices touched by those edges must be pairwise adjacent. + pub fn is_valid_cover(&self, config: &[usize]) -> bool { + let edges = self.graph.edges(); + let num_edges = edges.len(); + + if config.len() != num_edges { + return false; + } + + // Collect vertices per group and check clique property. + let max_group = match config.iter().max() { + Some(&m) => m, + None => return true, // no edges → trivially valid + }; + + for group in 0..=max_group { + let vertices: HashSet = config + .iter() + .enumerate() + .filter(|(_, &g)| g == group) + .flat_map(|(idx, _)| { + let (u, v) = edges[idx]; + [u, v] + }) + .collect(); + + let verts: Vec = vertices.into_iter().collect(); + for i in 0..verts.len() { + for j in (i + 1)..verts.len() { + if !self.graph.has_edge(verts[i], verts[j]) { + return false; + } + } + } + } + + true + } +} + +impl Problem for MinimumCoveringByCliques +where + G: Graph + crate::variant::VariantParam, +{ + const NAME: &'static str = "MinimumCoveringByCliques"; + type Value = Min; + + fn variant() -> Vec<(&'static str, &'static str)> { + crate::variant_params![G] + } + + fn dims(&self) -> Vec { + vec![self.graph.num_edges(); self.graph.num_edges()] + } + + fn evaluate(&self, config: &[usize]) -> Min { + if config.len() != self.graph.num_edges() { + return Min(None); + } + if self.graph.num_edges() == 0 { + return Min(Some(0)); + } + if !self.is_valid_cover(config) { + return Min(None); + } + let distinct_groups: HashSet = config.iter().copied().collect(); + Min(Some(distinct_groups.len())) + } +} + +crate::declare_variants! { + default MinimumCoveringByCliques => "2^num_edges", +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_model_example_specs() -> Vec { + // 6 vertices, 9 edges: + // (0,1),(1,2),(2,3),(3,0),(0,2),(4,0),(4,1),(5,2),(5,3) + // Optimal: 4 cliques + // edges 0,1,4 -> group 0 (clique {0,1,2}) + // edges 2,3 -> group 1 (clique {0,2,3}... wait, (2,3) and (3,0) -> vertices {0,2,3}) + // edges 5,6 -> group 2 (clique {0,1,4}) + // edges 7,8 -> group 3 (clique {2,3,5}) + // Config: [0, 0, 1, 1, 0, 2, 2, 3, 3] + vec![crate::example_db::specs::ModelExampleSpec { + id: "minimum_covering_by_cliques_simplegraph", + instance: Box::new(MinimumCoveringByCliques::new(SimpleGraph::new( + 6, + vec![ + (0, 1), + (1, 2), + (2, 3), + (3, 0), + (0, 2), + (4, 0), + (4, 1), + (5, 2), + (5, 3), + ], + ))), + optimal_config: vec![0, 0, 1, 1, 0, 2, 2, 3, 3], + optimal_value: serde_json::json!(4), + }] +} + +#[cfg(test)] +#[path = "../../unit_tests/models/graph/minimum_covering_by_cliques.rs"] +mod tests; diff --git a/src/models/graph/minimum_geometric_connected_dominating_set.rs b/src/models/graph/minimum_geometric_connected_dominating_set.rs new file mode 100644 index 00000000..b295af09 --- /dev/null +++ b/src/models/graph/minimum_geometric_connected_dominating_set.rs @@ -0,0 +1,207 @@ +//! Minimum Geometric Connected Dominating Set. +//! +//! Given a set of points P in the plane and a distance threshold B > 0, +//! find a minimum subset P' ⊆ P such that: +//! 1. Every point in P \ P' is within Euclidean distance B of some point in P' (domination). +//! 2. The subgraph induced on P' (edges between points within distance B) is connected. + +use crate::registry::{FieldInfo, ProblemSchemaEntry}; +use crate::traits::Problem; +use crate::types::Min; +use serde::{Deserialize, Serialize}; +use std::collections::VecDeque; + +inventory::submit! { + ProblemSchemaEntry { + name: "MinimumGeometricConnectedDominatingSet", + display_name: "Minimum Geometric Connected Dominating Set", + aliases: &[], + dimensions: &[], + module_path: module_path!(), + description: "Find minimum connected dominating set in a geometric point set", + fields: &[ + FieldInfo { + name: "points", + type_name: "Vec<(f64, f64)>", + description: "The set of points P in the plane", + }, + FieldInfo { + name: "radius", + type_name: "f64", + description: "The distance threshold B", + }, + ], + } +} + +/// Minimum Geometric Connected Dominating Set. +/// +/// Given points P in the plane and distance threshold B > 0, +/// find a minimum subset P' ⊆ P such that every point in P \ P' +/// is within distance B of some point in P', and the subgraph +/// induced on P' (edges between points within distance B) is connected. +/// +/// # Example +/// +/// ``` +/// use problemreductions::models::graph::MinimumGeometricConnectedDominatingSet; +/// use problemreductions::{Problem, Solver, BruteForce}; +/// +/// // Four collinear points with spacing 3 and radius 3.5: +/// // each point reaches its immediate neighbor but not two steps away. +/// let points = vec![(0.0, 0.0), (3.0, 0.0), (6.0, 0.0), (9.0, 0.0)]; +/// let problem = MinimumGeometricConnectedDominatingSet::new(points, 3.5); +/// +/// let solver = BruteForce::new(); +/// let witness = solver.find_witness(&problem).unwrap(); +/// let value = problem.evaluate(&witness).unwrap(); +/// assert_eq!(value, 2); // Two interior points dominate all and form a connected pair +/// ``` +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct MinimumGeometricConnectedDominatingSet { + /// The set of points in the plane. + points: Vec<(f64, f64)>, + /// The distance threshold B. + radius: f64, +} + +impl MinimumGeometricConnectedDominatingSet { + /// Create a new instance. + /// + /// # Panics + /// Panics if `radius <= 0.0` or if `points` is empty. + pub fn new(points: Vec<(f64, f64)>, radius: f64) -> Self { + assert!(radius > 0.0, "radius must be positive"); + assert!(!points.is_empty(), "points must be non-empty"); + Self { points, radius } + } + + /// Fallible constructor used by CLI validation and deserialization. + pub fn try_new(points: Vec<(f64, f64)>, radius: f64) -> Result { + if radius <= 0.0 { + return Err("radius must be positive".into()); + } + if points.is_empty() { + return Err("points must be non-empty".into()); + } + Ok(Self { points, radius }) + } + + /// Get the number of points. + pub fn num_points(&self) -> usize { + self.points.len() + } + + /// Get the distance threshold. + pub fn radius(&self) -> f64 { + self.radius + } + + /// Get a reference to the points. + pub fn points(&self) -> &[(f64, f64)] { + &self.points + } + + /// Squared Euclidean distance between two points. + fn dist_sq(a: (f64, f64), b: (f64, f64)) -> f64 { + let dx = a.0 - b.0; + let dy = a.1 - b.1; + dx * dx + dy * dy + } + + /// Check if two points are within distance B. + fn within_radius(&self, i: usize, j: usize) -> bool { + Self::dist_sq(self.points[i], self.points[j]) <= self.radius * self.radius + } + + /// Check if a configuration is a valid connected dominating set. + pub fn is_valid_solution(&self, config: &[usize]) -> bool { + let selected: Vec = config + .iter() + .enumerate() + .filter(|(_, &v)| v == 1) + .map(|(i, _)| i) + .collect(); + + if selected.is_empty() { + return false; + } + + // Check domination: every unselected point must be within distance B + // of some selected point. + for (i, &v) in config.iter().enumerate() { + if v == 0 && !selected.iter().any(|&s| self.within_radius(i, s)) { + return false; + } + } + + // Check connectivity: BFS on selected points using distance-B edges. + if selected.len() == 1 { + return true; + } + let mut visited = vec![false; selected.len()]; + let mut queue = VecDeque::new(); + visited[0] = true; + queue.push_back(0); + while let Some(u) = queue.pop_front() { + for (vi, &vj) in selected.iter().enumerate() { + if !visited[vi] && self.within_radius(selected[u], vj) { + visited[vi] = true; + queue.push_back(vi); + } + } + } + visited.iter().all(|&v| v) + } +} + +impl Problem for MinimumGeometricConnectedDominatingSet { + const NAME: &'static str = "MinimumGeometricConnectedDominatingSet"; + type Value = Min; + + fn variant() -> Vec<(&'static str, &'static str)> { + crate::variant_params![] + } + + fn dims(&self) -> Vec { + vec![2; self.num_points()] + } + + fn evaluate(&self, config: &[usize]) -> Min { + if !self.is_valid_solution(config) { + return Min(None); + } + let count = config.iter().filter(|&&v| v == 1).count(); + Min(Some(count)) + } +} + +crate::declare_variants! { + default MinimumGeometricConnectedDominatingSet => "2^num_points", +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_model_example_specs() -> Vec { + vec![crate::example_db::specs::ModelExampleSpec { + id: "minimum_geometric_connected_dominating_set", + instance: Box::new(MinimumGeometricConnectedDominatingSet::new( + vec![ + (0.0, 0.0), + (3.0, 0.0), + (6.0, 0.0), + (9.0, 0.0), + (0.0, 3.0), + (3.0, 3.0), + (6.0, 3.0), + (9.0, 3.0), + ], + 3.5, + )), + optimal_config: vec![1, 1, 1, 1, 0, 0, 0, 0], + optimal_value: serde_json::json!(4), + }] +} + +#[cfg(test)] +#[path = "../../unit_tests/models/graph/minimum_geometric_connected_dominating_set.rs"] +mod tests; diff --git a/src/models/graph/minimum_intersection_graph_basis.rs b/src/models/graph/minimum_intersection_graph_basis.rs new file mode 100644 index 00000000..ec1792a1 --- /dev/null +++ b/src/models/graph/minimum_intersection_graph_basis.rs @@ -0,0 +1,184 @@ +//! Minimum Intersection Graph Basis problem implementation. +//! +//! Given a graph G = (V, E), find a universe U of minimum cardinality such that +//! each vertex v can be assigned a subset S[v] ⊆ U with the intersection graph +//! of {S[v]} equal to G. + +use crate::registry::{FieldInfo, ProblemSchemaEntry, VariantDimension}; +use crate::topology::{Graph, SimpleGraph}; +use crate::traits::Problem; +use crate::types::Min; +use serde::{Deserialize, Serialize}; +use std::collections::HashSet; + +inventory::submit! { + ProblemSchemaEntry { + name: "MinimumIntersectionGraphBasis", + display_name: "Minimum Intersection Graph Basis", + aliases: &[], + dimensions: &[ + VariantDimension::new("graph", "SimpleGraph", &["SimpleGraph"]), + ], + module_path: module_path!(), + description: "Find minimum universe size for intersection graph representation", + fields: &[ + FieldInfo { name: "graph", type_name: "G", description: "The underlying graph G=(V,E)" }, + ], + } +} + +/// The Minimum Intersection Graph Basis problem. +/// +/// Given a graph G = (V, E), find a universe U of minimum cardinality and +/// an assignment of subsets S[v] ⊆ U for each vertex v ∈ V such that: +/// - For every edge (u, v) ∈ E: S[u] ∩ S[v] ≠ ∅ +/// - For every non-edge pair (u, v) ∉ E: S[u] ∩ S[v] = ∅ +/// - |U| is minimized +/// +/// The minimum |U| is the *intersection number* of G. +/// +/// Variables: n × |E| binary variables where n = |V| and |E| is the upper bound +/// on universe size. config[v * |E| + s] = 1 means element s ∈ S[v]. +/// +/// # Type Parameters +/// +/// * `G` - The graph type (e.g., `SimpleGraph`) +/// +/// # Example +/// +/// ``` +/// use problemreductions::models::graph::MinimumIntersectionGraphBasis; +/// use problemreductions::topology::SimpleGraph; +/// use problemreductions::{Problem, Solver, BruteForce}; +/// +/// // Path P3: 3 vertices, edges (0,1), (1,2) +/// let graph = SimpleGraph::new(3, vec![(0, 1), (1, 2)]); +/// let problem = MinimumIntersectionGraphBasis::new(graph); +/// +/// let solver = BruteForce::new(); +/// let solution = solver.find_witness(&problem).unwrap(); +/// let value = problem.evaluate(&solution); +/// assert_eq!(value, problemreductions::types::Min(Some(2))); +/// ``` +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct MinimumIntersectionGraphBasis { + /// The underlying graph. + graph: G, +} + +impl MinimumIntersectionGraphBasis { + /// Create a MinimumIntersectionGraphBasis problem from a graph. + pub fn new(graph: G) -> Self { + Self { graph } + } + + /// Get a reference to the underlying graph. + pub fn graph(&self) -> &G { + &self.graph + } + + /// Get the number of vertices in the underlying graph. + pub fn num_vertices(&self) -> usize { + self.graph.num_vertices() + } + + /// Get the number of edges in the underlying graph. + pub fn num_edges(&self) -> usize { + self.graph.num_edges() + } +} + +impl Problem for MinimumIntersectionGraphBasis +where + G: Graph + crate::variant::VariantParam, +{ + const NAME: &'static str = "MinimumIntersectionGraphBasis"; + type Value = Min; + + fn variant() -> Vec<(&'static str, &'static str)> { + crate::variant_params![G] + } + + fn dims(&self) -> Vec { + let n = self.graph.num_vertices(); + let m = self.graph.num_edges(); + if m == 0 { + // No edges: no variables needed; empty assignment is trivially valid. + return vec![]; + } + vec![2; n * m] + } + + fn evaluate(&self, config: &[usize]) -> Min { + let n = self.graph.num_vertices(); + let m = self.graph.num_edges(); + + if m == 0 { + // No edges: universe size 0 suffices (all subsets empty, no + // adjacency constraints). But we must also check that no two + // vertices are adjacent — which is guaranteed when m == 0. + if config.is_empty() { + return Min(Some(0)); + } else { + return Min(None); + } + } + + if config.len() != n * m { + return Min(None); + } + + // Parse subsets: S[v] = set of elements s where config[v * m + s] == 1 + let subsets: Vec> = (0..n) + .map(|v| (0..m).filter(|&s| config[v * m + s] == 1).collect()) + .collect(); + + // Check edge constraints: for every edge (u, v), S[u] ∩ S[v] ≠ ∅ + let edges = self.graph.edges(); + for &(u, v) in &edges { + if subsets[u].intersection(&subsets[v]).next().is_none() { + return Min(None); + } + } + + // Check non-edge constraints: for every non-edge pair (u, v), S[u] ∩ S[v] = ∅ + for u in 0..n { + for v in (u + 1)..n { + if !self.graph.has_edge(u, v) + && subsets[u].intersection(&subsets[v]).next().is_some() + { + return Min(None); + } + } + } + + // Count elements used (union of all subsets) + let used: HashSet = subsets.iter().flat_map(|s| s.iter().copied()).collect(); + Min(Some(used.len())) + } +} + +crate::declare_variants! { + default MinimumIntersectionGraphBasis => "num_edges^num_edges", +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_model_example_specs() -> Vec { + // P3: 3 vertices, edges (0,1), (1,2), num_edges=2 + // Intersection number = 2: S[0]={0}, S[1]={0,1}, S[2]={1} + // Config: vertex 0: [1,0], vertex 1: [1,1], vertex 2: [0,1] + // Full config: [1,0, 1,1, 0,1] + vec![crate::example_db::specs::ModelExampleSpec { + id: "minimum_intersection_graph_basis_simplegraph", + instance: Box::new(MinimumIntersectionGraphBasis::new(SimpleGraph::new( + 3, + vec![(0, 1), (1, 2)], + ))), + optimal_config: vec![1, 0, 1, 1, 0, 1], + optimal_value: serde_json::json!(2), + }] +} + +#[cfg(test)] +#[path = "../../unit_tests/models/graph/minimum_intersection_graph_basis.rs"] +mod tests; diff --git a/src/models/graph/mod.rs b/src/models/graph/mod.rs index 1fbec621..dcc6b2a8 100644 --- a/src/models/graph/mod.rs +++ b/src/models/graph/mod.rs @@ -2,18 +2,26 @@ //! //! Problems whose input is a graph (optionally weighted): //! - [`AcyclicPartition`]: Partition a digraph into bounded-weight groups with an acyclic quotient graph +//! - [`BoundedDiameterSpanningTree`]: Spanning tree with bounded weight and diameter +//! - [`DegreeConstrainedSpanningTree`]: Spanning tree with maximum vertex degree at most K //! - [`DirectedHamiltonianPath`]: Directed Hamiltonian path (decision problem) //! - [`MaximumIndependentSet`]: Maximum weight independent set //! - [`MaximalIS`]: Maximal independent set //! - [`MinimumVertexCover`]: Minimum weight vertex cover +//! - [`MinimumCoveringByCliques`]: Minimum number of cliques covering all edges +//! - [`MonochromaticTriangle`]: 2-color edges so that no triangle is monochromatic +//! - [`MinimumIntersectionGraphBasis`]: Minimum universe size for intersection graph representation //! - [`MinimumDominatingSet`]: Minimum dominating set +//! - [`MinimumGeometricConnectedDominatingSet`]: Minimum connected dominating set in a geometric point set //! - [`MinimumFeedbackVertexSet`]: Minimum weight feedback vertex set in a directed graph //! - [`MaximumClique`]: Maximum weight clique +//! - [`MaximumAchromaticNumber`]: Maximum number of colors in a complete proper coloring //! - [`MaxCut`]: Maximum cut on weighted graphs //! - [`MinimumCutIntoBoundedSets`]: Minimum cut into bounded sets (Garey & Johnson ND17) //! - [`MinimumDummyActivitiesPert`]: Minimum dummy activities in activity-on-arc PERT networks //! - [`HamiltonianCircuit`]: Hamiltonian circuit (decision problem) //! - [`IsomorphicSpanningTree`]: Isomorphic spanning tree (satisfaction) +//! - [`Kernel`]: Kernel of a directed graph (independent and absorbing vertex subset) //! - [`KClique`]: Clique decision problem with threshold k //! - [`KthBestSpanningTree`]: K distinct bounded spanning trees (satisfaction) //! - [`KColoring`]: K-vertex coloring @@ -27,7 +35,9 @@ //! - [`HamiltonianPathBetweenTwoVertices`]: Hamiltonian path between two specified vertices (decision problem) //! - [`LongestPath`]: Maximum-length simple s-t path //! - [`ShortestWeightConstrainedPath`]: Bicriteria simple s-t path with length and weight bounds +//! - [`PartitionIntoCliques`]: Partition vertices into K groups each inducing a clique //! - [`PartitionIntoForests`]: Partition vertices into K classes each inducing an acyclic subgraph +//! - [`PartitionIntoPerfectMatchings`]: Partition vertices into K groups each inducing a perfect matching //! - [`PartitionIntoPathsOfLength2`]: Partition vertices into triples with at least two edges each //! - [`BicliqueCover`]: Biclique cover on bipartite graphs //! - [`SteinerTreeInGraphs`]: Minimum weight Steiner tree connecting terminal vertices @@ -64,6 +74,8 @@ pub(crate) mod biclique_cover; pub(crate) mod biconnectivity_augmentation; pub(crate) mod bottleneck_traveling_salesman; pub(crate) mod bounded_component_spanning_forest; +pub(crate) mod bounded_diameter_spanning_tree; +pub(crate) mod degree_constrained_spanning_tree; pub(crate) mod directed_hamiltonian_path; pub(crate) mod directed_two_commodity_integral_flow; pub(crate) mod disjoint_connecting_paths; @@ -77,32 +89,40 @@ pub(crate) mod integral_flow_with_multipliers; pub(crate) mod isomorphic_spanning_tree; pub(crate) mod kclique; pub(crate) mod kcoloring; +pub(crate) mod kernel; pub(crate) mod kth_best_spanning_tree; pub(crate) mod length_bounded_disjoint_paths; pub(crate) mod longest_circuit; pub(crate) mod longest_path; pub(crate) mod max_cut; pub(crate) mod maximal_is; +pub(crate) mod maximum_achromatic_number; pub(crate) mod maximum_clique; pub(crate) mod maximum_independent_set; pub(crate) mod maximum_matching; pub(crate) mod min_max_multicenter; +pub(crate) mod minimum_covering_by_cliques; pub(crate) mod minimum_cut_into_bounded_sets; pub(crate) mod minimum_dominating_set; pub(crate) mod minimum_dummy_activities_pert; pub(crate) mod minimum_feedback_arc_set; pub(crate) mod minimum_feedback_vertex_set; +pub(crate) mod minimum_geometric_connected_dominating_set; +pub(crate) mod minimum_intersection_graph_basis; pub(crate) mod minimum_maximal_matching; pub(crate) mod minimum_multiway_cut; pub(crate) mod minimum_sum_multicenter; pub(crate) mod minimum_vertex_cover; pub(crate) mod mixed_chinese_postman; +pub(crate) mod monochromatic_triangle; pub(crate) mod multiple_choice_branching; pub(crate) mod multiple_copy_file_allocation; pub(crate) mod optimal_linear_arrangement; pub(crate) mod partial_feedback_edge_set; +pub(crate) mod partition_into_cliques; pub(crate) mod partition_into_forests; pub(crate) mod partition_into_paths_of_length_2; +pub(crate) mod partition_into_perfect_matchings; pub(crate) mod partition_into_triangles; pub(crate) mod path_constrained_network_flow; pub(crate) mod rooted_tree_arrangement; @@ -123,6 +143,8 @@ pub use biclique_cover::BicliqueCover; pub use biconnectivity_augmentation::BiconnectivityAugmentation; pub use bottleneck_traveling_salesman::BottleneckTravelingSalesman; pub use bounded_component_spanning_forest::BoundedComponentSpanningForest; +pub use bounded_diameter_spanning_tree::BoundedDiameterSpanningTree; +pub use degree_constrained_spanning_tree::DegreeConstrainedSpanningTree; pub use directed_hamiltonian_path::DirectedHamiltonianPath; pub use directed_two_commodity_integral_flow::DirectedTwoCommodityIntegralFlow; pub use disjoint_connecting_paths::DisjointConnectingPaths; @@ -136,32 +158,40 @@ pub use integral_flow_with_multipliers::IntegralFlowWithMultipliers; pub use isomorphic_spanning_tree::IsomorphicSpanningTree; pub use kclique::KClique; pub use kcoloring::KColoring; +pub use kernel::Kernel; pub use kth_best_spanning_tree::KthBestSpanningTree; pub use length_bounded_disjoint_paths::LengthBoundedDisjointPaths; pub use longest_circuit::LongestCircuit; pub use longest_path::LongestPath; pub use max_cut::MaxCut; pub use maximal_is::MaximalIS; +pub use maximum_achromatic_number::MaximumAchromaticNumber; pub use maximum_clique::MaximumClique; pub use maximum_independent_set::MaximumIndependentSet; pub use maximum_matching::MaximumMatching; pub use min_max_multicenter::MinMaxMulticenter; +pub use minimum_covering_by_cliques::MinimumCoveringByCliques; pub use minimum_cut_into_bounded_sets::MinimumCutIntoBoundedSets; pub use minimum_dominating_set::MinimumDominatingSet; pub use minimum_dummy_activities_pert::MinimumDummyActivitiesPert; pub use minimum_feedback_arc_set::MinimumFeedbackArcSet; pub use minimum_feedback_vertex_set::MinimumFeedbackVertexSet; +pub use minimum_geometric_connected_dominating_set::MinimumGeometricConnectedDominatingSet; +pub use minimum_intersection_graph_basis::MinimumIntersectionGraphBasis; pub use minimum_maximal_matching::MinimumMaximalMatching; pub use minimum_multiway_cut::MinimumMultiwayCut; pub use minimum_sum_multicenter::MinimumSumMulticenter; pub use minimum_vertex_cover::MinimumVertexCover; pub use mixed_chinese_postman::MixedChinesePostman; +pub use monochromatic_triangle::MonochromaticTriangle; pub use multiple_choice_branching::MultipleChoiceBranching; pub use multiple_copy_file_allocation::MultipleCopyFileAllocation; pub use optimal_linear_arrangement::OptimalLinearArrangement; pub use partial_feedback_edge_set::PartialFeedbackEdgeSet; +pub use partition_into_cliques::PartitionIntoCliques; pub use partition_into_forests::PartitionIntoForests; pub use partition_into_paths_of_length_2::PartitionIntoPathsOfLength2; +pub use partition_into_perfect_matchings::PartitionIntoPerfectMatchings; pub use partition_into_triangles::PartitionIntoTriangles; pub use path_constrained_network_flow::PathConstrainedNetworkFlow; pub use rooted_tree_arrangement::RootedTreeArrangement; @@ -180,6 +210,8 @@ pub use undirected_two_commodity_integral_flow::UndirectedTwoCommodityIntegralFl pub(crate) fn canonical_model_example_specs() -> Vec { let mut specs = Vec::new(); specs.extend(acyclic_partition::canonical_model_example_specs()); + specs.extend(bounded_diameter_spanning_tree::canonical_model_example_specs()); + specs.extend(degree_constrained_spanning_tree::canonical_model_example_specs()); specs.extend(directed_hamiltonian_path::canonical_model_example_specs()); specs.extend(maximum_independent_set::canonical_model_example_specs()); specs.extend(minimum_vertex_cover::canonical_model_example_specs()); @@ -192,15 +224,21 @@ pub(crate) fn canonical_model_example_specs() -> Vec Vec { + /// The underlying graph. + graph: G, + /// Precomputed list of triangles, each stored as three edge indices. + triangles: Vec<[usize; 3]>, + /// Ordered edge list (mirrors `graph.edges()` order). + edge_list: Vec<(usize, usize)>, +} + +impl MonochromaticTriangle { + /// Create a new Monochromatic Triangle instance. + pub fn new(graph: G) -> Self { + let edge_list = graph.edges(); + // Build edge-to-index mapping: (min(u,v), max(u,v)) -> index + let mut edge_index: HashMap<(usize, usize), usize> = HashMap::new(); + for (idx, &(u, v)) in edge_list.iter().enumerate() { + let key = if u < v { (u, v) } else { (v, u) }; + edge_index.insert(key, idx); + } + + // Find all triangles: for each triple (u, v, w) with u < v < w, + // check if all three edges exist. + let n = graph.num_vertices(); + let mut triangles = Vec::new(); + for u in 0..n { + for v in (u + 1)..n { + if !graph.has_edge(u, v) { + continue; + } + for w in (v + 1)..n { + if graph.has_edge(u, w) && graph.has_edge(v, w) { + let e_uv = edge_index[&(u, v)]; + let e_uw = edge_index[&(u, w)]; + let e_vw = edge_index[&(v, w)]; + triangles.push([e_uv, e_uw, e_vw]); + } + } + } + } + + Self { + graph, + triangles, + edge_list, + } + } + + /// Get a reference to the underlying graph. + pub fn graph(&self) -> &G { + &self.graph + } + + /// Get the number of vertices in the underlying graph. + pub fn num_vertices(&self) -> usize { + self.graph.num_vertices() + } + + /// Get the number of edges in the underlying graph. + pub fn num_edges(&self) -> usize { + self.graph.num_edges() + } + + /// Get the precomputed list of triangles (as edge-index triples). + pub fn triangles(&self) -> &[[usize; 3]] { + &self.triangles + } + + /// Get the ordered edge list. + pub fn edge_list(&self) -> &[(usize, usize)] { + &self.edge_list + } +} + +impl Problem for MonochromaticTriangle +where + G: Graph + VariantParam, +{ + const NAME: &'static str = "MonochromaticTriangle"; + type Value = crate::types::Or; + + fn variant() -> Vec<(&'static str, &'static str)> { + crate::variant_params![G] + } + + fn dims(&self) -> Vec { + vec![2; self.edge_list.len()] + } + + fn evaluate(&self, config: &[usize]) -> crate::types::Or { + crate::types::Or({ + if config.len() != self.edge_list.len() { + return crate::types::Or(false); + } + + // Check each triangle: if all three edges have the same color, + // the coloring is invalid. + for tri in &self.triangles { + let c0 = config[tri[0]]; + let c1 = config[tri[1]]; + let c2 = config[tri[2]]; + if c0 == c1 && c1 == c2 { + return crate::types::Or(false); + } + } + + true + }) + } +} + +crate::declare_variants! { + default MonochromaticTriangle => "2^num_edges", +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_model_example_specs() -> Vec { + // K4: 4 vertices, 6 edges, has a valid 2-coloring avoiding monochromatic triangles. + // Edges in order: (0,1),(0,2),(0,3),(1,2),(1,3),(2,3) + // Config [0,0,1,1,0,1]: + // Triangle (0,1,2): edges 0,1,3 -> colors 0,0,1 -> not monochromatic + // Triangle (0,1,3): edges 0,2,4 -> colors 0,1,0 -> not monochromatic + // Triangle (0,2,3): edges 1,2,5 -> colors 0,1,1 -> not monochromatic + // Triangle (1,2,3): edges 3,4,5 -> colors 1,0,1 -> not monochromatic + vec![crate::example_db::specs::ModelExampleSpec { + id: "monochromatic_triangle_simplegraph", + instance: Box::new(MonochromaticTriangle::new(SimpleGraph::new( + 4, + vec![(0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3)], + ))), + optimal_config: vec![0, 0, 1, 1, 0, 1], + optimal_value: serde_json::json!(true), + }] +} + +#[cfg(test)] +#[path = "../../unit_tests/models/graph/monochromatic_triangle.rs"] +mod tests; diff --git a/src/models/graph/partition_into_cliques.rs b/src/models/graph/partition_into_cliques.rs new file mode 100644 index 00000000..4189399b --- /dev/null +++ b/src/models/graph/partition_into_cliques.rs @@ -0,0 +1,182 @@ +//! Partition Into Cliques problem implementation. +//! +//! Given a graph G = (V, E) and a positive integer K <= |V|, determine whether +//! the vertex set can be partitioned into k <= K groups such that the subgraph +//! induced by each group is a complete subgraph (clique). + +use crate::registry::{FieldInfo, ProblemSchemaEntry, VariantDimension}; +use crate::topology::{Graph, SimpleGraph}; +use crate::traits::Problem; +use crate::variant::VariantParam; +use serde::{Deserialize, Serialize}; + +inventory::submit! { + ProblemSchemaEntry { + name: "PartitionIntoCliques", + display_name: "Partition into Cliques", + aliases: &[], + dimensions: &[ + VariantDimension::new("graph", "SimpleGraph", &["SimpleGraph"]), + ], + module_path: module_path!(), + description: "Partition vertices into K groups each inducing a clique", + fields: &[ + FieldInfo { name: "graph", type_name: "G", description: "The underlying graph G=(V,E)" }, + FieldInfo { name: "num_cliques", type_name: "usize", description: "num_cliques: maximum number of clique groups K (>= 1)" }, + ], + } +} + +/// The Partition Into Cliques problem. +/// +/// Given a graph G = (V, E) and a positive integer K <= |V|, determine whether +/// the vertices can be partitioned into k <= K groups V_1, ..., V_k such that +/// the subgraph induced by each V_i is a complete subgraph (clique). +/// +/// # Type Parameters +/// +/// * `G` - Graph type (e.g., SimpleGraph) +/// +/// # Example +/// +/// ``` +/// use problemreductions::models::graph::PartitionIntoCliques; +/// use problemreductions::topology::SimpleGraph; +/// use problemreductions::{Problem, Solver, BruteForce}; +/// +/// // Two triangles: 0-1-2-0 and 3-4-5-3 +/// let graph = SimpleGraph::new(6, vec![(0,1),(0,2),(1,2),(3,4),(3,5),(4,5)]); +/// let problem = PartitionIntoCliques::new(graph, 3); +/// +/// let solver = BruteForce::new(); +/// let solution = solver.find_witness(&problem); +/// assert!(solution.is_some()); +/// ``` +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(bound(deserialize = "G: serde::Deserialize<'de>"))] +pub struct PartitionIntoCliques { + /// The underlying graph. + graph: G, + /// Maximum number of clique groups. + num_cliques: usize, +} + +impl PartitionIntoCliques { + /// Create a new Partition Into Cliques instance. + /// + /// # Panics + /// Panics if `num_cliques` is zero or greater than `graph.num_vertices()`. + pub fn new(graph: G, num_cliques: usize) -> Self { + assert!(num_cliques >= 1, "num_cliques must be at least 1"); + assert!( + num_cliques <= graph.num_vertices(), + "num_cliques must be at most num_vertices" + ); + Self { graph, num_cliques } + } + + /// Get a reference to the underlying graph. + pub fn graph(&self) -> &G { + &self.graph + } + + /// Get the maximum number of clique groups. + pub fn num_cliques(&self) -> usize { + self.num_cliques + } + + /// Get the number of vertices in the underlying graph. + pub fn num_vertices(&self) -> usize { + self.graph.num_vertices() + } + + /// Get the number of edges in the underlying graph. + pub fn num_edges(&self) -> usize { + self.graph.num_edges() + } +} + +impl Problem for PartitionIntoCliques +where + G: Graph + VariantParam, +{ + const NAME: &'static str = "PartitionIntoCliques"; + type Value = crate::types::Or; + + fn variant() -> Vec<(&'static str, &'static str)> { + crate::variant_params![G] + } + + fn dims(&self) -> Vec { + vec![self.num_cliques; self.graph.num_vertices()] + } + + fn evaluate(&self, config: &[usize]) -> crate::types::Or { + crate::types::Or(is_valid_clique_partition( + &self.graph, + self.num_cliques, + config, + )) + } +} + +/// Check whether `config` is a valid K-clique partition of `graph`. +fn is_valid_clique_partition(graph: &G, num_cliques: usize, config: &[usize]) -> bool { + let n = graph.num_vertices(); + + // Basic validity checks + if config.len() != n { + return false; + } + if config.iter().any(|&c| c >= num_cliques) { + return false; + } + + // For each group, collect the vertices and check all pairs are adjacent. + for group in 0..num_cliques { + let members: Vec = (0..n).filter(|&v| config[v] == group).collect(); + for i in 0..members.len() { + for j in (i + 1)..members.len() { + if !graph.has_edge(members[i], members[j]) { + return false; + } + } + } + } + + true +} + +crate::declare_variants! { + default PartitionIntoCliques => "2^num_vertices", +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_model_example_specs() -> Vec { + vec![crate::example_db::specs::ModelExampleSpec { + id: "partition_into_cliques_simplegraph", + instance: Box::new(PartitionIntoCliques::new( + SimpleGraph::new( + 6, + vec![ + (0, 1), + (0, 2), + (1, 2), + (3, 4), + (3, 5), + (4, 5), + (0, 3), + (1, 4), + (2, 5), + ], + ), + 3, + )), + optimal_config: vec![0, 0, 0, 1, 1, 1], + optimal_value: serde_json::json!(true), + }] +} + +#[cfg(test)] +#[path = "../../unit_tests/models/graph/partition_into_cliques.rs"] +mod tests; diff --git a/src/models/graph/partition_into_perfect_matchings.rs b/src/models/graph/partition_into_perfect_matchings.rs new file mode 100644 index 00000000..89fcef0b --- /dev/null +++ b/src/models/graph/partition_into_perfect_matchings.rs @@ -0,0 +1,190 @@ +//! Partition Into Perfect Matchings problem implementation. +//! +//! Given a graph G = (V, E) and a positive integer K <= |V|, determine whether +//! the vertex set can be partitioned into k <= K groups such that the subgraph +//! induced by each group is a perfect matching (every vertex in the group has +//! exactly one neighbor within the group). + +use crate::registry::{FieldInfo, ProblemSchemaEntry, VariantDimension}; +use crate::topology::{Graph, SimpleGraph}; +use crate::traits::Problem; +use crate::variant::VariantParam; +use serde::{Deserialize, Serialize}; + +inventory::submit! { + ProblemSchemaEntry { + name: "PartitionIntoPerfectMatchings", + display_name: "Partition into Perfect Matchings", + aliases: &[], + dimensions: &[ + VariantDimension::new("graph", "SimpleGraph", &["SimpleGraph"]), + ], + module_path: module_path!(), + description: "Partition vertices into K groups each inducing a perfect matching", + fields: &[ + FieldInfo { name: "graph", type_name: "G", description: "The underlying graph G=(V,E)" }, + FieldInfo { name: "num_matchings", type_name: "usize", description: "num_matchings: maximum number of matching groups K (>= 1)" }, + ], + } +} + +/// The Partition Into Perfect Matchings problem. +/// +/// Given a graph G = (V, E) and a positive integer K <= |V|, determine whether +/// the vertices can be partitioned into k <= K groups V_1, ..., V_k such that +/// the subgraph induced by each V_i is a perfect matching: every vertex in V_i +/// has exactly one neighbor also in V_i. +/// +/// # Type Parameters +/// +/// * `G` - Graph type (e.g., SimpleGraph) +/// +/// # Example +/// +/// ``` +/// use problemreductions::models::graph::PartitionIntoPerfectMatchings; +/// use problemreductions::topology::SimpleGraph; +/// use problemreductions::{Problem, Solver, BruteForce}; +/// +/// // 4 vertices with edges: (0,1),(2,3),(0,2),(1,3) +/// let graph = SimpleGraph::new(4, vec![(0,1),(2,3),(0,2),(1,3)]); +/// let problem = PartitionIntoPerfectMatchings::new(graph, 2); +/// +/// let solver = BruteForce::new(); +/// let solution = solver.find_witness(&problem); +/// assert!(solution.is_some()); +/// ``` +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(bound(deserialize = "G: serde::Deserialize<'de>"))] +pub struct PartitionIntoPerfectMatchings { + /// The underlying graph. + graph: G, + /// Maximum number of matching groups. + num_matchings: usize, +} + +impl PartitionIntoPerfectMatchings { + /// Create a new Partition Into Perfect Matchings instance. + /// + /// # Panics + /// Panics if `num_matchings` is zero or greater than `graph.num_vertices()`. + pub fn new(graph: G, num_matchings: usize) -> Self { + assert!(num_matchings >= 1, "num_matchings must be at least 1"); + assert!( + num_matchings <= graph.num_vertices(), + "num_matchings must be at most num_vertices" + ); + Self { + graph, + num_matchings, + } + } + + /// Get a reference to the underlying graph. + pub fn graph(&self) -> &G { + &self.graph + } + + /// Get the maximum number of matching groups. + pub fn num_matchings(&self) -> usize { + self.num_matchings + } + + /// Get the number of vertices in the underlying graph. + pub fn num_vertices(&self) -> usize { + self.graph.num_vertices() + } + + /// Get the number of edges in the underlying graph. + pub fn num_edges(&self) -> usize { + self.graph.num_edges() + } +} + +impl Problem for PartitionIntoPerfectMatchings +where + G: Graph + VariantParam, +{ + const NAME: &'static str = "PartitionIntoPerfectMatchings"; + type Value = crate::types::Or; + + fn variant() -> Vec<(&'static str, &'static str)> { + crate::variant_params![G] + } + + fn dims(&self) -> Vec { + vec![self.num_matchings; self.graph.num_vertices()] + } + + fn evaluate(&self, config: &[usize]) -> crate::types::Or { + crate::types::Or(is_valid_perfect_matching_partition( + &self.graph, + self.num_matchings, + config, + )) + } +} + +/// Check whether `config` is a valid K-perfect-matching partition of `graph`. +fn is_valid_perfect_matching_partition( + graph: &G, + num_matchings: usize, + config: &[usize], +) -> bool { + let n = graph.num_vertices(); + + // Basic validity checks + if config.len() != n { + return false; + } + if config.iter().any(|&c| c >= num_matchings) { + return false; + } + + // For each group, collect the vertices and check every vertex has exactly + // one neighbor within the group (i.e., the induced subgraph is a perfect matching). + for group in 0..num_matchings { + let members: Vec = (0..n).filter(|&v| config[v] == group).collect(); + // Empty groups are OK + if members.is_empty() { + continue; + } + // A perfect matching requires an even number of vertices + if !members.len().is_multiple_of(2) { + return false; + } + // Each member must have exactly one neighbor in the group + for &v in &members { + let neighbor_count = members + .iter() + .filter(|&&u| u != v && graph.has_edge(v, u)) + .count(); + if neighbor_count != 1 { + return false; + } + } + } + + true +} + +crate::declare_variants! { + default PartitionIntoPerfectMatchings => "num_matchings^num_vertices", +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_model_example_specs() -> Vec { + vec![crate::example_db::specs::ModelExampleSpec { + id: "partition_into_perfect_matchings_simplegraph", + instance: Box::new(PartitionIntoPerfectMatchings::new( + SimpleGraph::new(4, vec![(0, 1), (2, 3), (0, 2), (1, 3)]), + 2, + )), + optimal_config: vec![0, 0, 1, 1], + optimal_value: serde_json::json!(true), + }] +} + +#[cfg(test)] +#[path = "../../unit_tests/models/graph/partition_into_perfect_matchings.rs"] +mod tests; diff --git a/src/models/misc/betweenness.rs b/src/models/misc/betweenness.rs new file mode 100644 index 00000000..2062f6af --- /dev/null +++ b/src/models/misc/betweenness.rs @@ -0,0 +1,186 @@ +//! Betweenness problem implementation. +//! +//! Given a finite set A and a collection C of ordered triples (a, b, c), +//! determine whether there exists a linear ordering f: A → {1, ..., |A|} +//! such that for each (a, b, c) ∈ C, either f(a) < f(b) < f(c) or +//! f(c) < f(b) < f(a) (i.e., b is between a and c). + +use crate::registry::{FieldInfo, ProblemSchemaEntry, ProblemSizeFieldEntry}; +use crate::traits::Problem; +use crate::types::Or; +use serde::de::Error as _; +use serde::{Deserialize, Deserializer, Serialize}; + +inventory::submit! { + ProblemSchemaEntry { + name: "Betweenness", + display_name: "Betweenness", + aliases: &[], + dimensions: &[], + module_path: module_path!(), + description: "Find a linear ordering where specified elements are between others", + fields: &[ + FieldInfo { name: "num_elements", type_name: "usize", description: "Number of elements in the set A" }, + FieldInfo { name: "triples", type_name: "Vec<(usize, usize, usize)>", description: "Collection of ordered triples (a, b, c) requiring b between a and c" }, + ], + } +} + +inventory::submit! { + ProblemSizeFieldEntry { + name: "Betweenness", + fields: &["num_elements", "num_triples"], + } +} + +#[derive(Debug, Clone, Serialize)] +pub struct Betweenness { + num_elements: usize, + triples: Vec<(usize, usize, usize)>, +} + +impl Betweenness { + fn validate_inputs( + num_elements: usize, + triples: &[(usize, usize, usize)], + ) -> Result<(), String> { + if num_elements == 0 { + return Err("Betweenness requires at least one element".to_string()); + } + for (i, &(a, b, c)) in triples.iter().enumerate() { + if a >= num_elements || b >= num_elements || c >= num_elements { + return Err(format!( + "Triple {} has element(s) out of range 0..{}", + i, num_elements + )); + } + if a == b || b == c || a == c { + return Err(format!( + "Triple {} has duplicate elements ({}, {}, {})", + i, a, b, c + )); + } + } + Ok(()) + } + + pub fn try_new( + num_elements: usize, + triples: Vec<(usize, usize, usize)>, + ) -> Result { + Self::validate_inputs(num_elements, &triples)?; + Ok(Self { + num_elements, + triples, + }) + } + + /// Create a new Betweenness instance. + /// + /// # Panics + /// + /// Panics if any triple element is out of range or if any triple has duplicate elements. + pub fn new(num_elements: usize, triples: Vec<(usize, usize, usize)>) -> Self { + Self::try_new(num_elements, triples).unwrap_or_else(|message| panic!("{message}")) + } + + /// Number of elements in the set A. + pub fn num_elements(&self) -> usize { + self.num_elements + } + + /// Number of betweenness triples. + pub fn num_triples(&self) -> usize { + self.triples.len() + } + + /// The collection of ordered triples. + pub fn triples(&self) -> &[(usize, usize, usize)] { + &self.triples + } + + /// Check whether a configuration represents a valid permutation and + /// satisfies all betweenness constraints. + fn is_valid_solution(&self, config: &[usize]) -> bool { + if config.len() != self.num_elements { + return false; + } + + // Check that config is a valid permutation of 0..n + let n = self.num_elements; + let mut seen = vec![false; n]; + for &pos in config { + if pos >= n || seen[pos] { + return false; + } + seen[pos] = true; + } + + // Check betweenness constraints: for each (a, b, c), + // config[a] < config[b] < config[c] OR config[c] < config[b] < config[a] + for &(a, b, c) in &self.triples { + let fa = config[a]; + let fb = config[b]; + let fc = config[c]; + if !((fa < fb && fb < fc) || (fc < fb && fb < fa)) { + return false; + } + } + + true + } +} + +#[derive(Deserialize)] +struct BetweennessData { + num_elements: usize, + triples: Vec<(usize, usize, usize)>, +} + +impl<'de> Deserialize<'de> for Betweenness { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let data = BetweennessData::deserialize(deserializer)?; + Self::try_new(data.num_elements, data.triples).map_err(D::Error::custom) + } +} + +impl Problem for Betweenness { + const NAME: &'static str = "Betweenness"; + type Value = Or; + + fn variant() -> Vec<(&'static str, &'static str)> { + crate::variant_params![] + } + + fn dims(&self) -> Vec { + vec![self.num_elements; self.num_elements] + } + + fn evaluate(&self, config: &[usize]) -> Or { + Or(self.is_valid_solution(config)) + } +} + +crate::declare_variants! { + default Betweenness => "2^num_elements", +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_model_example_specs() -> Vec { + vec![crate::example_db::specs::ModelExampleSpec { + id: "betweenness", + instance: Box::new(Betweenness::new( + 5, + vec![(0, 1, 2), (2, 3, 4), (0, 2, 4), (1, 3, 4)], + )), + optimal_config: vec![0, 1, 2, 3, 4], + optimal_value: serde_json::json!(true), + }] +} + +#[cfg(test)] +#[path = "../../unit_tests/models/misc/betweenness.rs"] +mod tests; diff --git a/src/models/misc/cyclic_ordering.rs b/src/models/misc/cyclic_ordering.rs new file mode 100644 index 00000000..9087fe49 --- /dev/null +++ b/src/models/misc/cyclic_ordering.rs @@ -0,0 +1,191 @@ +//! Cyclic Ordering problem implementation. +//! +//! Given a finite set A and a collection C of ordered triples (a, b, c), +//! determine whether there exists a permutation f: A → {0, ..., |A|-1} +//! such that for each (a, b, c) ∈ C, the values f(a), f(b), f(c) appear +//! in cyclic order — i.e., (f(a) < f(b) < f(c)) ∨ (f(b) < f(c) < f(a)) +//! ∨ (f(c) < f(a) < f(b)). + +use crate::registry::{FieldInfo, ProblemSchemaEntry, ProblemSizeFieldEntry}; +use crate::traits::Problem; +use crate::types::Or; +use serde::de::Error as _; +use serde::{Deserialize, Deserializer, Serialize}; + +inventory::submit! { + ProblemSchemaEntry { + name: "CyclicOrdering", + display_name: "Cyclic Ordering", + aliases: &[], + dimensions: &[], + module_path: module_path!(), + description: "Find a permutation satisfying cyclic ordering constraints on triples", + fields: &[ + FieldInfo { name: "num_elements", type_name: "usize", description: "Number of elements in the set A" }, + FieldInfo { name: "triples", type_name: "Vec<(usize, usize, usize)>", description: "Collection of ordered triples (a, b, c) requiring cyclic order" }, + ], + } +} + +inventory::submit! { + ProblemSizeFieldEntry { + name: "CyclicOrdering", + fields: &["num_elements", "num_triples"], + } +} + +#[derive(Debug, Clone, Serialize)] +pub struct CyclicOrdering { + num_elements: usize, + triples: Vec<(usize, usize, usize)>, +} + +impl CyclicOrdering { + fn validate_inputs( + num_elements: usize, + triples: &[(usize, usize, usize)], + ) -> Result<(), String> { + if num_elements == 0 { + return Err("CyclicOrdering requires at least one element".to_string()); + } + for (i, &(a, b, c)) in triples.iter().enumerate() { + if a >= num_elements || b >= num_elements || c >= num_elements { + return Err(format!( + "Triple {} has element(s) out of range 0..{}", + i, num_elements + )); + } + if a == b || b == c || a == c { + return Err(format!( + "Triple {} has duplicate elements ({}, {}, {})", + i, a, b, c + )); + } + } + Ok(()) + } + + pub fn try_new( + num_elements: usize, + triples: Vec<(usize, usize, usize)>, + ) -> Result { + Self::validate_inputs(num_elements, &triples)?; + Ok(Self { + num_elements, + triples, + }) + } + + /// Create a new CyclicOrdering instance. + /// + /// # Panics + /// + /// Panics if any triple element is out of range or if any triple has duplicate elements. + pub fn new(num_elements: usize, triples: Vec<(usize, usize, usize)>) -> Self { + Self::try_new(num_elements, triples).unwrap_or_else(|message| panic!("{message}")) + } + + /// Number of elements in the set A. + pub fn num_elements(&self) -> usize { + self.num_elements + } + + /// Number of cyclic ordering triples. + pub fn num_triples(&self) -> usize { + self.triples.len() + } + + /// The collection of ordered triples. + pub fn triples(&self) -> &[(usize, usize, usize)] { + &self.triples + } + + /// Check whether a configuration represents a valid permutation and + /// satisfies all cyclic ordering constraints. + fn is_valid_solution(&self, config: &[usize]) -> bool { + if config.len() != self.num_elements { + return false; + } + + // Check that config is a valid permutation of 0..n + let n = self.num_elements; + let mut seen = vec![false; n]; + for &pos in config { + if pos >= n || seen[pos] { + return false; + } + seen[pos] = true; + } + + // Check cyclic ordering constraints: for each (a, b, c), + // (fa < fb < fc) OR (fb < fc < fa) OR (fc < fa < fb) + for &(a, b, c) in &self.triples { + if !is_cyclic_order(config[a], config[b], config[c]) { + return false; + } + } + + true + } +} + +/// Check whether three distinct values appear in cyclic order: +/// (a < b < c) OR (b < c < a) OR (c < a < b). +#[allow(clippy::nonminimal_bool)] +fn is_cyclic_order(a: usize, b: usize, c: usize) -> bool { + (a < b && b < c) || (b < c && c < a) || (c < a && a < b) +} + +#[derive(Deserialize)] +struct CyclicOrderingData { + num_elements: usize, + triples: Vec<(usize, usize, usize)>, +} + +impl<'de> Deserialize<'de> for CyclicOrdering { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let data = CyclicOrderingData::deserialize(deserializer)?; + Self::try_new(data.num_elements, data.triples).map_err(D::Error::custom) + } +} + +impl Problem for CyclicOrdering { + const NAME: &'static str = "CyclicOrdering"; + type Value = Or; + + fn variant() -> Vec<(&'static str, &'static str)> { + crate::variant_params![] + } + + fn dims(&self) -> Vec { + vec![self.num_elements; self.num_elements] + } + + fn evaluate(&self, config: &[usize]) -> Or { + Or(self.is_valid_solution(config)) + } +} + +crate::declare_variants! { + default CyclicOrdering => "factorial(num_elements)", +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_model_example_specs() -> Vec { + vec![crate::example_db::specs::ModelExampleSpec { + id: "cyclic_ordering", + instance: Box::new(CyclicOrdering::new( + 5, + vec![(0, 1, 2), (2, 3, 0), (1, 3, 4)], + )), + optimal_config: vec![1, 3, 4, 0, 2], + optimal_value: serde_json::json!(true), + }] +} + +#[cfg(test)] +#[path = "../../unit_tests/models/misc/cyclic_ordering.rs"] +mod tests; diff --git a/src/models/misc/dynamic_storage_allocation.rs b/src/models/misc/dynamic_storage_allocation.rs new file mode 100644 index 00000000..adcba4d9 --- /dev/null +++ b/src/models/misc/dynamic_storage_allocation.rs @@ -0,0 +1,189 @@ +//! Dynamic Storage Allocation problem implementation. +//! +//! Given items each with arrival time, departure time, and size, plus a +//! memory size D, determine whether each item can be assigned a starting +//! address such that every item fits within [0, D-1] and no two +//! time-overlapping items share memory addresses. + +use crate::registry::{FieldInfo, ProblemSchemaEntry, ProblemSizeFieldEntry}; +use crate::traits::Problem; +use crate::types::Or; +use serde::de::Error as _; +use serde::{Deserialize, Deserializer, Serialize}; + +inventory::submit! { + ProblemSchemaEntry { + name: "DynamicStorageAllocation", + display_name: "Dynamic Storage Allocation", + aliases: &[], + dimensions: &[], + module_path: module_path!(), + description: "Assign starting addresses for items with time intervals and sizes within bounded memory", + fields: &[ + FieldInfo { name: "items", type_name: "Vec<(usize, usize, usize)>", description: "Items as (arrival, departure, size) tuples" }, + FieldInfo { name: "memory_size", type_name: "usize", description: "Total memory size D" }, + ], + } +} + +inventory::submit! { + ProblemSizeFieldEntry { + name: "DynamicStorageAllocation", + fields: &["num_items", "memory_size"], + } +} + +/// Dynamic Storage Allocation problem. +/// +/// Each item `a` has arrival time `r(a)`, departure time `d(a)`, and size `s(a)`. +/// The goal is to find a starting address `σ(a) ∈ {0, ..., D - s(a)}` for each item +/// such that time-overlapping items do not overlap in memory. +#[derive(Debug, Clone, Serialize)] +pub struct DynamicStorageAllocation { + items: Vec<(usize, usize, usize)>, + memory_size: usize, +} + +impl DynamicStorageAllocation { + fn validate_inputs(items: &[(usize, usize, usize)], memory_size: usize) -> Result<(), String> { + if items.is_empty() { + return Err("DynamicStorageAllocation requires at least one item".to_string()); + } + if memory_size == 0 { + return Err("DynamicStorageAllocation requires a positive memory_size".to_string()); + } + for (i, &(arrival, departure, size)) in items.iter().enumerate() { + if size == 0 { + return Err(format!("Item {i} has zero size; all sizes must be >= 1")); + } + if departure <= arrival { + return Err(format!( + "Item {i} has departure ({departure}) <= arrival ({arrival}); departure must be strictly greater" + )); + } + if size > memory_size { + return Err(format!( + "Item {i} has size ({size}) > memory_size ({memory_size}); every item must fit in memory" + )); + } + } + Ok(()) + } + + /// Try to create a new `DynamicStorageAllocation` instance. + pub fn try_new(items: Vec<(usize, usize, usize)>, memory_size: usize) -> Result { + Self::validate_inputs(&items, memory_size)?; + Ok(Self { items, memory_size }) + } + + /// Create a new `DynamicStorageAllocation` instance. + /// + /// # Panics + /// + /// Panics if any item has zero size, departure <= arrival, or size > memory_size. + pub fn new(items: Vec<(usize, usize, usize)>, memory_size: usize) -> Self { + Self::try_new(items, memory_size).unwrap_or_else(|message| panic!("{message}")) + } + + /// The items as `(arrival, departure, size)` tuples. + pub fn items(&self) -> &[(usize, usize, usize)] { + &self.items + } + + /// The total memory size D. + pub fn memory_size(&self) -> usize { + self.memory_size + } + + /// The number of items. + pub fn num_items(&self) -> usize { + self.items.len() + } +} + +#[derive(Deserialize)] +struct DynamicStorageAllocationData { + items: Vec<(usize, usize, usize)>, + memory_size: usize, +} + +impl<'de> Deserialize<'de> for DynamicStorageAllocation { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let data = DynamicStorageAllocationData::deserialize(deserializer)?; + Self::try_new(data.items, data.memory_size).map_err(D::Error::custom) + } +} + +impl Problem for DynamicStorageAllocation { + const NAME: &'static str = "DynamicStorageAllocation"; + type Value = Or; + + fn variant() -> Vec<(&'static str, &'static str)> { + crate::variant_params![] + } + + fn dims(&self) -> Vec { + self.items + .iter() + .map(|&(_, _, s)| self.memory_size - s + 1) + .collect() + } + + fn evaluate(&self, config: &[usize]) -> Or { + Or({ + if config.len() != self.num_items() { + return Or(false); + } + + // Check each item fits within memory + for (i, &(_, _, size)) in self.items.iter().enumerate() { + let start = config[i]; + if start + size > self.memory_size { + return Or(false); + } + } + + // Check all pairs of time-overlapping items for memory non-overlap + for (i, &(r_i, d_i, s_i)) in self.items.iter().enumerate() { + let sigma_i = config[i]; + for (j, &(r_j, d_j, s_j)) in self.items.iter().enumerate().skip(i + 1) { + // Time overlap: r_i < d_j AND r_j < d_i + if r_i < d_j && r_j < d_i { + let sigma_j = config[j]; + // Memory overlap: NOT (sigma_i + s_i <= sigma_j OR sigma_j + s_j <= sigma_i) + let no_memory_overlap = + sigma_i + s_i <= sigma_j || sigma_j + s_j <= sigma_i; + if !no_memory_overlap { + return Or(false); + } + } + } + } + true + }) + } +} + +crate::declare_variants! { + default DynamicStorageAllocation => "(memory_size + 1)^num_items", +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_model_example_specs() -> Vec { + vec![crate::example_db::specs::ModelExampleSpec { + id: "dynamic_storage_allocation", + instance: Box::new(DynamicStorageAllocation::new( + vec![(0, 3, 2), (0, 2, 3), (1, 4, 1), (2, 5, 3), (3, 5, 2)], + 6, + )), + optimal_config: vec![0, 2, 5, 2, 0], + optimal_value: serde_json::json!(true), + }] +} + +#[cfg(test)] +#[path = "../../unit_tests/models/misc/dynamic_storage_allocation.rs"] +mod tests; diff --git a/src/models/misc/feasible_register_assignment.rs b/src/models/misc/feasible_register_assignment.rs new file mode 100644 index 00000000..651b5ec3 --- /dev/null +++ b/src/models/misc/feasible_register_assignment.rs @@ -0,0 +1,256 @@ +//! Feasible Register Assignment problem implementation. +//! +//! Given a directed acyclic graph G = (V, A), K registers, and a fixed +//! register assignment f: V → {0, ..., K-1}, determine whether there +//! exists a topological ordering of the vertices such that no register +//! conflict arises during execution. NP-complete [Bouchez et al., 2006]. + +use crate::registry::{FieldInfo, ProblemSchemaEntry}; +use crate::traits::Problem; +use serde::{Deserialize, Serialize}; + +inventory::submit! { + ProblemSchemaEntry { + name: "FeasibleRegisterAssignment", + display_name: "Feasible Register Assignment", + aliases: &[], + dimensions: &[], + module_path: module_path!(), + description: "Determine whether a DAG computation can be scheduled without register conflicts under a fixed assignment", + fields: &[ + FieldInfo { name: "num_vertices", type_name: "usize", description: "Number of vertices n = |V|" }, + FieldInfo { name: "arcs", type_name: "Vec<(usize, usize)>", description: "Directed arcs (v, u) meaning v depends on u" }, + FieldInfo { name: "num_registers", type_name: "usize", description: "Number of registers K" }, + FieldInfo { name: "assignment", type_name: "Vec", description: "Register assignment f(v) for each vertex" }, + ], + } +} + +/// The Feasible Register Assignment problem. +/// +/// Given a directed acyclic graph G = (V, A) where arcs represent data +/// dependencies, K registers, and an assignment f: V → {0, ..., K-1}, +/// determine whether there exists a topological evaluation ordering such +/// that no two simultaneously live values share the same register. +/// +/// # Representation +/// +/// An arc `(v, u)` means vertex `v` depends on vertex `u` (i.e., `u` must +/// be computed before `v`). Each variable represents a vertex, with domain +/// `{0, ..., n-1}` giving its evaluation position (the config must be a +/// valid permutation). +/// +/// # Example +/// +/// ``` +/// use problemreductions::models::misc::FeasibleRegisterAssignment; +/// use problemreductions::{Problem, Solver, BruteForce}; +/// +/// // 4 vertices: v0 depends on v1 and v2, v1 depends on v3 +/// let problem = FeasibleRegisterAssignment::new( +/// 4, +/// vec![(0, 1), (0, 2), (1, 3)], +/// 2, +/// vec![0, 1, 0, 0], +/// ); +/// let solver = BruteForce::new(); +/// let solution = solver.find_witness(&problem); +/// assert!(solution.is_some()); +/// ``` +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct FeasibleRegisterAssignment { + /// Number of vertices. + num_vertices: usize, + /// Directed arcs (v, u) meaning v depends on u. + arcs: Vec<(usize, usize)>, + /// Number of registers K. + num_registers: usize, + /// Register assignment f(v) for each vertex. + assignment: Vec, +} + +impl FeasibleRegisterAssignment { + /// Create a new Feasible Register Assignment instance. + /// + /// # Panics + /// + /// Panics if any arc index is out of bounds (>= num_vertices), + /// if any arc is a self-loop, if the assignment length does not + /// match num_vertices, or if any assignment value >= num_registers. + pub fn new( + num_vertices: usize, + arcs: Vec<(usize, usize)>, + num_registers: usize, + assignment: Vec, + ) -> Self { + for &(v, u) in &arcs { + assert!( + v < num_vertices && u < num_vertices, + "Arc ({}, {}) out of bounds for {} vertices", + v, + u, + num_vertices + ); + assert!(v != u, "Self-loop ({}, {}) not allowed in a DAG", v, u); + } + assert_eq!( + assignment.len(), + num_vertices, + "Assignment length {} does not match num_vertices {}", + assignment.len(), + num_vertices + ); + for (v, &r) in assignment.iter().enumerate() { + assert!( + r < num_registers || num_registers == 0, + "Assignment[{}] = {} is out of bounds for {} registers", + v, + r, + num_registers + ); + } + Self { + num_vertices, + arcs, + num_registers, + assignment, + } + } + + /// Get the number of vertices. + pub fn num_vertices(&self) -> usize { + self.num_vertices + } + + /// Get the number of arcs. + pub fn num_arcs(&self) -> usize { + self.arcs.len() + } + + /// Get the number of registers. + pub fn num_registers(&self) -> usize { + self.num_registers + } + + /// Get the arcs. + pub fn arcs(&self) -> &[(usize, usize)] { + &self.arcs + } + + /// Get the register assignment. + pub fn assignment(&self) -> &[usize] { + &self.assignment + } + + /// Check whether the given config (position assignment) is feasible. + /// + /// Returns `true` if the config is a valid permutation, respects + /// topological ordering, and has no register conflicts. + pub fn is_feasible(&self, config: &[usize]) -> bool { + let n = self.num_vertices; + if config.len() != n { + return false; + } + + // Check valid permutation: each position 0..n-1 used exactly once + let mut order = vec![0usize; n]; // order[position] = vertex + let mut used = vec![false; n]; + for (vertex, &position) in config.iter().enumerate() { + if position >= n { + return false; + } + if used[position] { + return false; + } + used[position] = true; + order[position] = vertex; + } + + // Build dependency info + let mut dependencies: Vec> = vec![vec![]; n]; + let mut dependents: Vec> = vec![vec![]; n]; + for &(v, u) in &self.arcs { + dependencies[v].push(u); + dependents[u].push(v); + } + + // Check topological ordering and register conflicts + let mut computed = vec![false; n]; + + for step in 0..n { + let vertex = order[step]; + + // Check dependencies: all dependencies must have been computed + for &dep in &dependencies[vertex] { + if !computed[dep] { + return false; + } + } + + // Check register conflict: the register assigned to this vertex + // must not be currently occupied by a live value. + // A previously computed vertex w is "live" if: + // - assignment[w] == assignment[vertex] (same register) + // - w has at least one dependent (other than vertex) that hasn't + // been computed yet. The current vertex is consuming w's value + // at this step, so we exclude it from the liveness check. + let reg = self.assignment[vertex]; + for &w in &order[..step] { + if self.assignment[w] == reg { + let still_live = dependents[w].iter().any(|&d| d != vertex && !computed[d]); + if still_live { + return false; + } + } + } + + computed[vertex] = true; + } + + true + } +} + +impl Problem for FeasibleRegisterAssignment { + const NAME: &'static str = "FeasibleRegisterAssignment"; + type Value = crate::types::Or; + + fn variant() -> Vec<(&'static str, &'static str)> { + crate::variant_params![] + } + + fn dims(&self) -> Vec { + vec![self.num_vertices; self.num_vertices] + } + + fn evaluate(&self, config: &[usize]) -> crate::types::Or { + crate::types::Or(self.is_feasible(config)) + } +} + +crate::declare_variants! { + default FeasibleRegisterAssignment => "num_vertices ^ 2 * 2 ^ num_vertices", +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_model_example_specs() -> Vec { + vec![crate::example_db::specs::ModelExampleSpec { + id: "feasible_register_assignment", + // 4 vertices, arcs: (0,1),(0,2),(1,3), K=2, assignment [0,1,0,0] + // Valid order: v3, v1, v2, v0 -> config [3, 1, 2, 0] + instance: Box::new(FeasibleRegisterAssignment::new( + 4, + vec![(0, 1), (0, 2), (1, 3)], + 2, + vec![0, 1, 0, 0], + )), + // config[v] = position: v0 at pos 3, v1 at pos 1, v2 at pos 2, v3 at pos 0 + // Order: v3(pos0), v1(pos1), v2(pos2), v0(pos3) + optimal_config: vec![3, 1, 2, 0], + optimal_value: serde_json::json!(true), + }] +} + +#[cfg(test)] +#[path = "../../unit_tests/models/misc/feasible_register_assignment.rs"] +mod tests; diff --git a/src/models/misc/mod.rs b/src/models/misc/mod.rs index 9a0f323e..4146b1a6 100644 --- a/src/models/misc/mod.rs +++ b/src/models/misc/mod.rs @@ -2,19 +2,24 @@ //! //! Problems with unique input structures that don't fit other categories: //! - [`AdditionalKey`]: Determine whether a relational schema has an additional candidate key +//! - [`Betweenness`]: Find a linear ordering satisfying betweenness constraints on triples //! - [`BinPacking`]: Bin Packing (minimize bins) +//! - [`CyclicOrdering`]: Find a permutation satisfying cyclic ordering constraints on triples //! - [`BoyceCoddNormalFormViolation`]: Boyce-Codd Normal Form Violation (BCNF) //! - [`ConsistencyOfDatabaseFrequencyTables`]: Pairwise frequency-table consistency //! - [`ConjunctiveBooleanQuery`]: Evaluate a conjunctive Boolean query over relations //! - [`ConjunctiveQueryFoldability`]: Conjunctive Query Foldability +//! - [`DynamicStorageAllocation`]: Assign starting addresses in bounded memory for time-varying items //! - [`ExpectedRetrievalCost`]: Allocate records to circular sectors within a latency bound //! - [`Factoring`]: Integer factorization +//! - [`FeasibleRegisterAssignment`]: Determine if a DAG computation can be scheduled without register conflicts under a fixed assignment //! - [`IntegerExpressionMembership`]: Membership in a set defined by an integer expression tree //! - [`FlowShopScheduling`]: Flow Shop Scheduling (meet deadline on m processors) //! - [`GroupingBySwapping`]: Group equal symbols into contiguous blocks by adjacent swaps //! - [`JobShopScheduling`]: Minimize makespan with per-job processor routes //! - [`Knapsack`]: 0-1 Knapsack (maximize value subject to weight capacity) //! - [`MultiprocessorScheduling`]: Schedule tasks on processors to meet a deadline +//! - [`Numerical3DimensionalMatching`]: Partition W∪X∪Y into m triples each summing to B //! - [`OpenShopScheduling`]: Open Shop Scheduling (minimize makespan, free task order per job) //! - [`LongestCommonSubsequence`]: Longest Common Subsequence //! - [`MinimumExternalMacroDataCompression`]: Minimize compression cost using external dictionary @@ -22,6 +27,7 @@ //! - [`MinimumTardinessSequencing`]: Minimize tardy tasks in single-machine scheduling //! - [`PaintShop`]: Minimize color switches in paint shop scheduling //! - [`CosineProductIntegration`]: Balanced sign assignment for integer frequencies +//! - [`NonLivenessFreePetriNet`]: Determine whether a free-choice Petri net is not live //! - [`Partition`]: Partition a multiset into two equal-sum subsets //! - [`PartiallyOrderedKnapsack`]: Knapsack with precedence constraints //! - [`PrecedenceConstrainedScheduling`]: Schedule unit tasks on processors by deadline @@ -42,10 +48,13 @@ //! - [`ShortestCommonSupersequence`]: Find a common supersequence of bounded length //! - [`TimetableDesign`]: Schedule craftsmen on tasks across work periods //! - [`StringToStringCorrection`]: String-to-String Correction (derive target via deletions and swaps) +//! - [`SubsetProduct`]: Find a subset whose product equals exactly a target value //! - [`SubsetSum`]: Find a subset summing to exactly a target value //! - [`SumOfSquaresPartition`]: Partition integers into K groups minimizing sum of squared group sums pub(crate) mod additional_key; +mod betweenness; +mod cyclic_ordering; /// Decode a Lehmer code into a permutation of `0..n`. /// @@ -78,9 +87,11 @@ pub(crate) mod conjunctive_boolean_query; pub(crate) mod conjunctive_query_foldability; mod consistency_of_database_frequency_tables; mod cosine_product_integration; +mod dynamic_storage_allocation; mod ensemble_computation; pub(crate) mod expected_retrieval_cost; pub(crate) mod factoring; +mod feasible_register_assignment; mod flow_shop_scheduling; mod grouping_by_swapping; pub(crate) mod integer_expression_membership; @@ -92,6 +103,8 @@ mod minimum_external_macro_data_compression; mod minimum_internal_macro_data_compression; mod minimum_tardiness_sequencing; mod multiprocessor_scheduling; +mod non_liveness_free_petri_net; +mod numerical_3_dimensional_matching; mod open_shop_scheduling; pub(crate) mod paintshop; pub(crate) mod partially_ordered_knapsack; @@ -115,12 +128,14 @@ pub(crate) mod shortest_common_supersequence; mod stacker_crane; mod staff_scheduling; pub(crate) mod string_to_string_correction; +mod subset_product; mod subset_sum; pub(crate) mod sum_of_squares_partition; mod three_partition; mod timetable_design; pub use additional_key::AdditionalKey; +pub use betweenness::Betweenness; pub use bin_packing::BinPacking; pub use boyce_codd_normal_form_violation::BoyceCoddNormalFormViolation; pub use capacity_assignment::CapacityAssignment; @@ -130,9 +145,12 @@ pub use consistency_of_database_frequency_tables::{ ConsistencyOfDatabaseFrequencyTables, FrequencyTable, KnownValue, }; pub use cosine_product_integration::CosineProductIntegration; +pub use cyclic_ordering::CyclicOrdering; +pub use dynamic_storage_allocation::DynamicStorageAllocation; pub use ensemble_computation::EnsembleComputation; pub use expected_retrieval_cost::ExpectedRetrievalCost; pub use factoring::Factoring; +pub use feasible_register_assignment::FeasibleRegisterAssignment; pub use flow_shop_scheduling::FlowShopScheduling; pub use grouping_by_swapping::GroupingBySwapping; pub use integer_expression_membership::{IntExpr, IntegerExpressionMembership}; @@ -144,6 +162,8 @@ pub use minimum_external_macro_data_compression::MinimumExternalMacroDataCompres pub use minimum_internal_macro_data_compression::MinimumInternalMacroDataCompression; pub use minimum_tardiness_sequencing::MinimumTardinessSequencing; pub use multiprocessor_scheduling::MultiprocessorScheduling; +pub use non_liveness_free_petri_net::NonLivenessFreePetriNet; +pub use numerical_3_dimensional_matching::Numerical3DimensionalMatching; pub use open_shop_scheduling::OpenShopScheduling; pub use paintshop::PaintShop; pub use partially_ordered_knapsack::PartiallyOrderedKnapsack; @@ -167,6 +187,7 @@ pub use shortest_common_supersequence::ShortestCommonSupersequence; pub use stacker_crane::StackerCrane; pub use staff_scheduling::StaffScheduling; pub use string_to_string_correction::StringToStringCorrection; +pub use subset_product::SubsetProduct; pub use subset_sum::SubsetSum; pub use sum_of_squares_partition::SumOfSquaresPartition; pub use three_partition::ThreePartition; @@ -216,13 +237,20 @@ pub(crate) fn canonical_model_example_specs() -> Vec", description: "Arcs from places to transitions" }, + FieldInfo { name: "transition_to_place", type_name: "Vec<(usize,usize)>", description: "Arcs from transitions to places" }, + FieldInfo { name: "initial_marking", type_name: "Vec", description: "Initial marking M₀ (tokens per place)" }, + ], + } +} + +inventory::submit! { + ProblemSizeFieldEntry { + name: "NonLivenessFreePetriNet", + fields: &["num_places", "num_transitions", "num_arcs", "initial_token_sum"], + } +} + +#[derive(Debug, Clone, Serialize)] +pub struct NonLivenessFreePetriNet { + num_places: usize, + num_transitions: usize, + place_to_transition: Vec<(usize, usize)>, + transition_to_place: Vec<(usize, usize)>, + initial_marking: Vec, +} + +impl NonLivenessFreePetriNet { + fn validate_inputs( + num_places: usize, + num_transitions: usize, + place_to_transition: &[(usize, usize)], + transition_to_place: &[(usize, usize)], + initial_marking: &[usize], + ) -> Result<(), String> { + if num_places == 0 { + return Err("NonLivenessFreePetriNet requires at least one place".to_string()); + } + if num_transitions == 0 { + return Err("NonLivenessFreePetriNet requires at least one transition".to_string()); + } + if initial_marking.len() != num_places { + return Err(format!( + "initial_marking length {} does not match num_places {}", + initial_marking.len(), + num_places + )); + } + for (i, &(p, t)) in place_to_transition.iter().enumerate() { + if p >= num_places { + return Err(format!( + "place_to_transition arc {} has place {} out of range 0..{}", + i, p, num_places + )); + } + if t >= num_transitions { + return Err(format!( + "place_to_transition arc {} has transition {} out of range 0..{}", + i, t, num_transitions + )); + } + } + for (i, &(t, p)) in transition_to_place.iter().enumerate() { + if t >= num_transitions { + return Err(format!( + "transition_to_place arc {} has transition {} out of range 0..{}", + i, t, num_transitions + )); + } + if p >= num_places { + return Err(format!( + "transition_to_place arc {} has place {} out of range 0..{}", + i, p, num_places + )); + } + } + + // Validate free-choice property: for any two transitions sharing an + // input place, they must share ALL input places (identical preset). + let mut preset: HashMap> = HashMap::new(); + for &(p, t) in place_to_transition { + preset.entry(t).or_default().insert(p); + } + // Group transitions by shared input places + for &(p, _) in place_to_transition { + let transitions_from_p: Vec = place_to_transition + .iter() + .filter(|&&(pp, _)| pp == p) + .map(|&(_, t)| t) + .collect(); + for i in 0..transitions_from_p.len() { + for j in (i + 1)..transitions_from_p.len() { + let t1 = transitions_from_p[i]; + let t2 = transitions_from_p[j]; + let p1 = preset.get(&t1).cloned().unwrap_or_default(); + let p2 = preset.get(&t2).cloned().unwrap_or_default(); + if p1 != p2 { + return Err(format!( + "Free-choice violation: transitions {} and {} share input place {} but have different presets", + t1, t2, p + )); + } + } + } + } + + Ok(()) + } + + /// Try to create a new `NonLivenessFreePetriNet` instance, returning an error + /// if validation fails. + pub fn try_new( + num_places: usize, + num_transitions: usize, + place_to_transition: Vec<(usize, usize)>, + transition_to_place: Vec<(usize, usize)>, + initial_marking: Vec, + ) -> Result { + Self::validate_inputs( + num_places, + num_transitions, + &place_to_transition, + &transition_to_place, + &initial_marking, + )?; + Ok(Self { + num_places, + num_transitions, + place_to_transition, + transition_to_place, + initial_marking, + }) + } + + /// Create a new `NonLivenessFreePetriNet` instance. + /// + /// # Panics + /// + /// Panics if validation fails (indices out of range, wrong marking length, + /// or free-choice violation). + pub fn new( + num_places: usize, + num_transitions: usize, + place_to_transition: Vec<(usize, usize)>, + transition_to_place: Vec<(usize, usize)>, + initial_marking: Vec, + ) -> Self { + Self::try_new( + num_places, + num_transitions, + place_to_transition, + transition_to_place, + initial_marking, + ) + .unwrap_or_else(|message| panic!("{message}")) + } + + /// Number of places |S|. + pub fn num_places(&self) -> usize { + self.num_places + } + + /// Number of transitions |T|. + pub fn num_transitions(&self) -> usize { + self.num_transitions + } + + /// Total number of arcs |F|. + pub fn num_arcs(&self) -> usize { + self.place_to_transition.len() + self.transition_to_place.len() + } + + /// Sum of tokens in the initial marking. + pub fn initial_token_sum(&self) -> usize { + self.initial_marking.iter().sum() + } + + /// Arcs from places to transitions. + pub fn place_to_transition(&self) -> &[(usize, usize)] { + &self.place_to_transition + } + + /// Arcs from transitions to places. + pub fn transition_to_place(&self) -> &[(usize, usize)] { + &self.transition_to_place + } + + /// Initial marking M₀. + pub fn initial_marking(&self) -> &[usize] { + &self.initial_marking + } + + /// Determine which transitions are enabled at the given marking. + fn enabled_transitions(&self, marking: &[usize]) -> Vec { + let mut enabled = vec![true; self.num_transitions]; + // A transition t is enabled iff every input place has at least one token. + // First, mark all transitions that have at least one input place. + let mut has_input = vec![false; self.num_transitions]; + for &(p, t) in &self.place_to_transition { + has_input[t] = true; + if marking[p] == 0 { + enabled[t] = false; + } + } + // Transitions with no input places are always enabled (source transitions). + // They remain true in the enabled vector. + // But we need to handle the case where has_input is false: leave enabled as true. + let _ = has_input; // used implicitly above + enabled + } + + /// Fire a transition, producing a new marking. Returns None if not enabled. + fn fire(&self, marking: &[usize], transition: usize) -> Option> { + let mut new_marking = marking.to_vec(); + // Remove tokens from input places + for &(p, t) in &self.place_to_transition { + if t == transition { + if new_marking[p] == 0 { + return None; + } + new_marking[p] -= 1; + } + } + // Add tokens to output places + for &(t, p) in &self.transition_to_place { + if t == transition { + new_marking[p] += 1; + } + } + Some(new_marking) + } + + /// Build the bounded reachability graph and determine which transitions + /// are globally dead (i.e., there exists a reachable marking from which + /// the transition can never fire again). + /// + /// For boundedness, we cap exploration at markings where no place exceeds + /// `initial_token_sum`. This is sound for free-choice nets under the + /// NP-completeness assumption from Garey & Johnson. + fn compute_globally_dead_transitions(&self) -> Vec { + let token_cap = self.initial_token_sum(); + let num_t = self.num_transitions; + + // Build reachability graph: BFS from initial marking. + let mut marking_index: HashMap, usize> = HashMap::new(); + let mut markings: Vec> = Vec::new(); + // successors[m_idx] = list of (transition, next_marking_idx) + let mut successors: Vec> = Vec::new(); + let mut queue: VecDeque = VecDeque::new(); + + let initial = self.initial_marking.clone(); + marking_index.insert(initial.clone(), 0); + markings.push(initial); + successors.push(Vec::new()); + queue.push_back(0); + + while let Some(m_idx) = queue.pop_front() { + let enabled = self.enabled_transitions(&markings[m_idx]); + for (t, &is_enabled) in enabled.iter().enumerate() { + if !is_enabled { + continue; + } + if let Some(new_marking) = self.fire(&markings[m_idx], t) { + // Check bound: no place exceeds token_cap + if new_marking.iter().any(|&tokens| tokens > token_cap) { + continue; + } + let next_idx = if let Some(&idx) = marking_index.get(&new_marking) { + idx + } else { + let idx = markings.len(); + marking_index.insert(new_marking.clone(), idx); + markings.push(new_marking); + successors.push(Vec::new()); + queue.push_back(idx); + idx + }; + successors[m_idx].push((t, next_idx)); + } + } + } + + let num_markings = markings.len(); + + // For each transition t, find the set of markings from which t can + // eventually fire (via BFS on the reachability graph). + // A transition is globally dead iff there exists a reachable marking + // NOT in this set. + // + // We compute this by backward BFS: starting from markings where t fires, + // propagate backward through all transitions. + let mut globally_dead = vec![false; num_t]; + + // Build reverse adjacency once (shared across all transitions). + let mut predecessors: Vec> = vec![Vec::new(); num_markings]; + for (m_idx, succs) in successors.iter().enumerate() { + for &(_tr, next_idx) in succs { + predecessors[next_idx].push(m_idx); + } + } + + for (t, dead) in globally_dead.iter_mut().enumerate() { + // Find markings where transition t is directly fired + // (i.e., markings that have an outgoing edge for transition t) + let mut can_reach_t = vec![false; num_markings]; + let mut bfs_queue: VecDeque = VecDeque::new(); + + for (m_idx, succs) in successors.iter().enumerate() { + if succs.iter().any(|&(tr, _)| tr == t) { + can_reach_t[m_idx] = true; + bfs_queue.push_back(m_idx); + } + } + + // Backward BFS: from which markings can we reach a marking where t fires? + while let Some(m_idx) = bfs_queue.pop_front() { + for &pred_idx in &predecessors[m_idx] { + if !can_reach_t[pred_idx] { + can_reach_t[pred_idx] = true; + bfs_queue.push_back(pred_idx); + } + } + } + + // t is globally dead iff some reachable marking cannot reach a firing of t + if can_reach_t.iter().any(|&reached| !reached) { + *dead = true; + } + } + + globally_dead + } +} + +#[derive(Deserialize)] +struct NonLivenessFreePetriNetData { + num_places: usize, + num_transitions: usize, + place_to_transition: Vec<(usize, usize)>, + transition_to_place: Vec<(usize, usize)>, + initial_marking: Vec, +} + +impl<'de> Deserialize<'de> for NonLivenessFreePetriNet { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let data = NonLivenessFreePetriNetData::deserialize(deserializer)?; + Self::try_new( + data.num_places, + data.num_transitions, + data.place_to_transition, + data.transition_to_place, + data.initial_marking, + ) + .map_err(D::Error::custom) + } +} + +impl Problem for NonLivenessFreePetriNet { + const NAME: &'static str = "NonLivenessFreePetriNet"; + type Value = Or; + + fn variant() -> Vec<(&'static str, &'static str)> { + crate::variant_params![] + } + + fn dims(&self) -> Vec { + vec![2; self.num_transitions] + } + + fn evaluate(&self, config: &[usize]) -> Or { + if config.len() != self.num_transitions { + return Or(false); + } + + let globally_dead = self.compute_globally_dead_transitions(); + + // Config selects transitions claimed to be dead. + // Return true iff at least one selected transition is indeed globally dead. + for (t, &selected) in config.iter().enumerate() { + if selected == 1 && globally_dead[t] { + return Or(true); + } + } + + Or(false) + } +} + +crate::declare_variants! { + default NonLivenessFreePetriNet => "(initial_token_sum + 1) ^ num_places * num_transitions", +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_model_example_specs() -> Vec { + vec![crate::example_db::specs::ModelExampleSpec { + id: "non_liveness_free_petri_net", + instance: Box::new(NonLivenessFreePetriNet::new( + 4, + 3, + vec![(0, 0), (1, 1), (2, 2)], + vec![(0, 1), (1, 2), (2, 3)], + vec![1, 0, 0, 0], + )), + optimal_config: vec![1, 1, 1], + optimal_value: serde_json::json!(true), + }] +} + +#[cfg(test)] +#[path = "../../unit_tests/models/misc/non_liveness_free_petri_net.rs"] +mod tests; diff --git a/src/models/misc/numerical_3_dimensional_matching.rs b/src/models/misc/numerical_3_dimensional_matching.rs new file mode 100644 index 00000000..c657f513 --- /dev/null +++ b/src/models/misc/numerical_3_dimensional_matching.rs @@ -0,0 +1,235 @@ +//! Numerical 3-Dimensional Matching (N3DM) problem implementation. +//! +//! Given disjoint sets W, X, Y each with m elements, sizes s(a) ∈ Z⁺ for +//! every element with B/4 < s(a) < B/2, and a bound B where the total sum +//! equals mB. Decide whether W ∪ X ∪ Y can be partitioned into m triples, +//! each containing one element from W, X, and Y, with each triple summing +//! to exactly B. + +use crate::registry::{FieldInfo, ProblemSchemaEntry, ProblemSizeFieldEntry}; +use crate::traits::Problem; +use crate::types::Or; +use serde::de::Error as _; +use serde::{Deserialize, Deserializer, Serialize}; + +inventory::submit! { + ProblemSchemaEntry { + name: "Numerical3DimensionalMatching", + display_name: "Numerical 3-Dimensional Matching", + aliases: &["N3DM"], + dimensions: &[], + module_path: module_path!(), + description: "Partition W∪X∪Y into m triples (one from each set) each summing to B", + fields: &[ + FieldInfo { name: "sizes_w", type_name: "Vec", description: "Positive integer sizes for each element of W" }, + FieldInfo { name: "sizes_x", type_name: "Vec", description: "Positive integer sizes for each element of X" }, + FieldInfo { name: "sizes_y", type_name: "Vec", description: "Positive integer sizes for each element of Y" }, + FieldInfo { name: "bound", type_name: "u64", description: "Target sum B for each triple" }, + ], + } +} + +inventory::submit! { + ProblemSizeFieldEntry { + name: "Numerical3DimensionalMatching", + fields: &["num_groups", "bound"], + } +} + +#[derive(Debug, Clone, Serialize)] +pub struct Numerical3DimensionalMatching { + sizes_w: Vec, + sizes_x: Vec, + sizes_y: Vec, + bound: u64, +} + +impl Numerical3DimensionalMatching { + fn validate_inputs( + sizes_w: &[u64], + sizes_x: &[u64], + sizes_y: &[u64], + bound: u64, + ) -> Result<(), String> { + let m = sizes_w.len(); + if m == 0 { + return Err( + "Numerical3DimensionalMatching requires at least one element per set".to_string(), + ); + } + if sizes_x.len() != m || sizes_y.len() != m { + return Err( + "Numerical3DimensionalMatching requires all three sets to have the same size" + .to_string(), + ); + } + if bound == 0 { + return Err("Numerical3DimensionalMatching requires a positive bound".to_string()); + } + + let bound128 = u128::from(bound); + for &size in sizes_w.iter().chain(sizes_x.iter()).chain(sizes_y.iter()) { + if size == 0 { + return Err("All sizes must be positive (> 0)".to_string()); + } + let size128 = u128::from(size); + if !(4 * size128 > bound128 && 2 * size128 < bound128) { + return Err("Every size must lie strictly between B/4 and B/2".to_string()); + } + } + + let total_sum: u128 = sizes_w + .iter() + .chain(sizes_x.iter()) + .chain(sizes_y.iter()) + .map(|&s| u128::from(s)) + .sum(); + let expected_sum = bound128 * (m as u128); + if total_sum != expected_sum { + return Err("Total sum of all sizes must equal m * bound".to_string()); + } + if total_sum > u128::from(u64::MAX) { + return Err("Total sum exceeds u64 range".to_string()); + } + + Ok(()) + } + + pub fn try_new( + sizes_w: Vec, + sizes_x: Vec, + sizes_y: Vec, + bound: u64, + ) -> Result { + Self::validate_inputs(&sizes_w, &sizes_x, &sizes_y, bound)?; + Ok(Self { + sizes_w, + sizes_x, + sizes_y, + bound, + }) + } + + /// Create a new Numerical 3-Dimensional Matching instance. + /// + /// # Panics + /// + /// Panics if the input violates the N3DM invariants. + pub fn new(sizes_w: Vec, sizes_x: Vec, sizes_y: Vec, bound: u64) -> Self { + Self::try_new(sizes_w, sizes_x, sizes_y, bound) + .unwrap_or_else(|message| panic!("{message}")) + } + + pub fn sizes_w(&self) -> &[u64] { + &self.sizes_w + } + + pub fn sizes_x(&self) -> &[u64] { + &self.sizes_x + } + + pub fn sizes_y(&self) -> &[u64] { + &self.sizes_y + } + + pub fn bound(&self) -> u64 { + self.bound + } + + pub fn num_groups(&self) -> usize { + self.sizes_w.len() + } +} + +#[derive(Deserialize)] +struct Numerical3DimensionalMatchingData { + sizes_w: Vec, + sizes_x: Vec, + sizes_y: Vec, + bound: u64, +} + +impl<'de> Deserialize<'de> for Numerical3DimensionalMatching { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let data = Numerical3DimensionalMatchingData::deserialize(deserializer)?; + Self::try_new(data.sizes_w, data.sizes_x, data.sizes_y, data.bound) + .map_err(D::Error::custom) + } +} + +impl Problem for Numerical3DimensionalMatching { + const NAME: &'static str = "Numerical3DimensionalMatching"; + type Value = Or; + + fn variant() -> Vec<(&'static str, &'static str)> { + crate::variant_params![] + } + + fn dims(&self) -> Vec { + vec![self.num_groups(); 2 * self.num_groups()] + } + + fn evaluate(&self, config: &[usize]) -> Or { + Or({ + let m = self.num_groups(); + if config.len() != 2 * m { + return Or(false); + } + + // First m values: assignment of X-elements to W-elements (must be a permutation) + let x_perm = &config[..m]; + // Second m values: assignment of Y-elements to W-elements (must be a permutation) + let y_perm = &config[m..]; + + // Check that both are valid permutations of 0..m + let mut x_used = vec![false; m]; + let mut y_used = vec![false; m]; + + for i in 0..m { + if x_perm[i] >= m || y_perm[i] >= m { + return Or(false); + } + if x_used[x_perm[i]] || y_used[y_perm[i]] { + return Or(false); + } + x_used[x_perm[i]] = true; + y_used[y_perm[i]] = true; + } + + // Check that each triple sums to B + let target = u128::from(self.bound); + (0..m).all(|i| { + let sum = u128::from(self.sizes_w[i]) + + u128::from(self.sizes_x[x_perm[i]]) + + u128::from(self.sizes_y[y_perm[i]]); + sum == target + }) + }) + } +} + +crate::declare_variants! { + default Numerical3DimensionalMatching => "3^num_groups", +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_model_example_specs() -> Vec { + vec![crate::example_db::specs::ModelExampleSpec { + id: "numerical_3_dimensional_matching", + instance: Box::new(Numerical3DimensionalMatching::new( + vec![4, 5], + vec![4, 5], + vec![5, 7], + 15, + )), + optimal_config: vec![0, 1, 1, 0], + optimal_value: serde_json::json!(true), + }] +} + +#[cfg(test)] +#[path = "../../unit_tests/models/misc/numerical_3_dimensional_matching.rs"] +mod tests; diff --git a/src/models/misc/subset_product.rs b/src/models/misc/subset_product.rs new file mode 100644 index 00000000..75bc9c97 --- /dev/null +++ b/src/models/misc/subset_product.rs @@ -0,0 +1,221 @@ +//! Subset Product problem implementation. +//! +//! Given a set of positive integers and a target value, the problem asks whether +//! any subset's product equals exactly the target. A multiplicative analogue of +//! Subset Sum; NP-complete (see e.g. Garey & Johnson, 1979). +//! +//! This implementation uses arbitrary-precision integers (`BigUint`) so +//! reductions can construct large instances without fixed-width overflow. + +use crate::registry::{FieldInfo, ProblemSchemaEntry}; +use crate::traits::Problem; +use num_bigint::{BigUint, ToBigUint}; +use num_traits::{One, Zero}; +use serde::{Deserialize, Serialize}; + +inventory::submit! { + ProblemSchemaEntry { + name: "SubsetProduct", + display_name: "Subset Product", + aliases: &[], + dimensions: &[], + module_path: module_path!(), + description: "Find a subset of positive integers whose product equals exactly a target value", + fields: &[ + FieldInfo { name: "sizes", type_name: "Vec", description: "Positive integer sizes s(a) for each element" }, + FieldInfo { name: "target", type_name: "BigUint", description: "Target product B" }, + ], + } +} + +/// The Subset Product problem. +/// +/// Given a set of `n` positive integers and a target `B`, determine whether +/// there exists a subset whose elements multiply to exactly `B`. +/// +/// # Representation +/// +/// Each element has a binary variable: `x_i = 1` if element `i` is selected, +/// `0` otherwise. The problem is satisfiable iff `∏_{i: x_i=1} sizes[i] == target`. +/// +/// # Example +/// +/// ``` +/// use problemreductions::models::misc::SubsetProduct; +/// use problemreductions::{Problem, Solver, BruteForce}; +/// +/// let problem = SubsetProduct::new(vec![2u32, 3, 5, 7, 6, 10], 210u32); +/// let solver = BruteForce::new(); +/// let solution = solver.find_witness(&problem); +/// assert!(solution.is_some()); +/// ``` +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct SubsetProduct { + #[serde(with = "decimal_biguint_vec")] + sizes: Vec, + #[serde(with = "decimal_biguint")] + target: BigUint, +} + +impl SubsetProduct { + /// Create a new SubsetProduct instance. + /// + /// # Panics + /// + /// Panics if any size is not positive (must be > 0) or if target is zero. + pub fn new(sizes: Vec, target: T) -> Self + where + S: ToBigUint, + T: ToBigUint, + { + let sizes: Vec = sizes + .into_iter() + .map(|s| s.to_biguint().expect("All sizes must be positive (> 0)")) + .collect(); + assert!( + sizes.iter().all(|s| !s.is_zero()), + "All sizes must be positive (> 0)" + ); + let target = target + .to_biguint() + .expect("SubsetProduct target must be nonnegative"); + assert!(!target.is_zero(), "SubsetProduct target must be positive"); + Self { sizes, target } + } + + /// Create a new SubsetProduct instance without validating sizes. + /// + /// This is intended for reductions that produce SubsetProduct instances + /// where positivity is guaranteed by construction. + #[allow(dead_code)] + pub(crate) fn new_unchecked(sizes: Vec, target: BigUint) -> Self { + Self { sizes, target } + } + + /// Returns the element sizes. + pub fn sizes(&self) -> &[BigUint] { + &self.sizes + } + + /// Returns the target product. + pub fn target(&self) -> &BigUint { + &self.target + } + + /// Returns the number of elements. + pub fn num_elements(&self) -> usize { + self.sizes.len() + } +} + +impl Problem for SubsetProduct { + const NAME: &'static str = "SubsetProduct"; + type Value = crate::types::Or; + + fn variant() -> Vec<(&'static str, &'static str)> { + crate::variant_params![] + } + + fn dims(&self) -> Vec { + vec![2; self.num_elements()] + } + + fn evaluate(&self, config: &[usize]) -> crate::types::Or { + crate::types::Or({ + if config.len() != self.num_elements() { + return crate::types::Or(false); + } + if config.iter().any(|&v| v >= 2) { + return crate::types::Or(false); + } + let mut product = BigUint::one(); + for (i, &x) in config.iter().enumerate() { + if x == 1 { + product *= &self.sizes[i]; + } + } + product == self.target + }) + } +} + +crate::declare_variants! { + default SubsetProduct => "2^(num_elements / 2)", +} + +mod decimal_biguint { + use super::BigUint; + use serde::de::Error; + use serde::{Deserialize, Deserializer, Serializer}; + + #[derive(Deserialize)] + #[serde(untagged)] + pub(super) enum Repr { + String(String), + U64(u64), + I64(i64), + } + + pub(super) fn parse_repr(value: Repr) -> Result { + match value { + Repr::String(s) => BigUint::parse_bytes(s.as_bytes(), 10) + .ok_or_else(|| E::custom(format!("invalid decimal integer: {s}"))), + Repr::U64(n) => Ok(BigUint::from(n)), + Repr::I64(n) if n >= 0 => Ok(BigUint::from(n as u64)), + Repr::I64(n) => Err(E::custom(format!("expected nonnegative integer, got {n}"))), + } + } + + pub fn serialize(value: &BigUint, serializer: S) -> Result + where + S: Serializer, + { + serializer.serialize_str(&value.to_str_radix(10)) + } + + pub fn deserialize<'de, D>(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + parse_repr(Repr::deserialize(deserializer)?) + } +} + +mod decimal_biguint_vec { + use super::BigUint; + use serde::{Deserialize, Deserializer, Serialize, Serializer}; + + pub fn serialize(values: &[BigUint], serializer: S) -> Result + where + S: Serializer, + { + let strings: Vec = values.iter().map(ToString::to_string).collect(); + strings.serialize(serializer) + } + + pub fn deserialize<'de, D>(deserializer: D) -> Result, D::Error> + where + D: Deserializer<'de>, + { + let values = Vec::::deserialize(deserializer)?; + values + .into_iter() + .map(super::decimal_biguint::parse_repr::) + .collect() + } +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_model_example_specs() -> Vec { + // 6 elements [2,3,5,7,6,10], target 210 → select {2,3,5,7} + vec![crate::example_db::specs::ModelExampleSpec { + id: "subset_product", + instance: Box::new(SubsetProduct::new(vec![2u32, 3, 5, 7, 6, 10], 210u32)), + optimal_config: vec![1, 1, 1, 1, 0, 0], + optimal_value: serde_json::json!(true), + }] +} + +#[cfg(test)] +#[path = "../../unit_tests/models/misc/subset_product.rs"] +mod tests; diff --git a/src/models/mod.rs b/src/models/mod.rs index 7e049f8d..7c1cadf5 100644 --- a/src/models/mod.rs +++ b/src/models/mod.rs @@ -10,28 +10,33 @@ pub mod set; // Re-export commonly used types pub use algebraic::{ - ClosestVectorProblem, ConsecutiveBlockMinimization, ConsecutiveOnesMatrixAugmentation, - ConsecutiveOnesSubmatrix, EquilibriumPoint, FeasibleBasisExtension, QuadraticAssignment, - QuadraticDiophantineEquations, SimultaneousIncongruences, SparseMatrixCompression, BMF, ILP, - QUBO, + AlgebraicEquationsOverGF2, ClosestVectorProblem, ConsecutiveBlockMinimization, + ConsecutiveOnesMatrixAugmentation, ConsecutiveOnesSubmatrix, EquilibriumPoint, + FeasibleBasisExtension, MinimumMatrixDomination, MinimumWeightSolutionToLinearEquations, + QuadraticAssignment, QuadraticDiophantineEquations, SimultaneousIncongruences, + SparseMatrixCompression, BMF, ILP, QUBO, }; pub use formula::{ - CNFClause, CircuitSAT, KSatisfiability, NAESatisfiability, QuantifiedBooleanFormulas, - Quantifier, Satisfiability, + CNFClause, CircuitSAT, KSatisfiability, NAESatisfiability, NonTautology, + OneInThreeSatisfiability, Planar3Satisfiability, QuantifiedBooleanFormulas, Quantifier, + Satisfiability, }; pub use graph::{ AcyclicPartition, BalancedCompleteBipartiteSubgraph, BicliqueCover, BiconnectivityAugmentation, - BottleneckTravelingSalesman, BoundedComponentSpanningForest, DirectedHamiltonianPath, - DirectedTwoCommodityIntegralFlow, DisjointConnectingPaths, GeneralizedHex, HamiltonianCircuit, - HamiltonianPath, HamiltonianPathBetweenTwoVertices, IntegralFlowBundles, - IntegralFlowHomologousArcs, IntegralFlowWithMultipliers, IsomorphicSpanningTree, KClique, - KColoring, KthBestSpanningTree, LengthBoundedDisjointPaths, LongestCircuit, LongestPath, - MaxCut, MaximalIS, MaximumClique, MaximumIndependentSet, MaximumMatching, MinMaxMulticenter, - MinimumCutIntoBoundedSets, MinimumDominatingSet, MinimumDummyActivitiesPert, - MinimumFeedbackArcSet, MinimumFeedbackVertexSet, MinimumMaximalMatching, MinimumMultiwayCut, - MinimumSumMulticenter, MinimumVertexCover, MixedChinesePostman, MultipleChoiceBranching, - MultipleCopyFileAllocation, OptimalLinearArrangement, PartialFeedbackEdgeSet, - PartitionIntoForests, PartitionIntoPathsOfLength2, PartitionIntoTriangles, + BottleneckTravelingSalesman, BoundedComponentSpanningForest, BoundedDiameterSpanningTree, + DegreeConstrainedSpanningTree, DirectedHamiltonianPath, DirectedTwoCommodityIntegralFlow, + DisjointConnectingPaths, GeneralizedHex, HamiltonianCircuit, HamiltonianPath, + HamiltonianPathBetweenTwoVertices, IntegralFlowBundles, IntegralFlowHomologousArcs, + IntegralFlowWithMultipliers, IsomorphicSpanningTree, KClique, KColoring, Kernel, + KthBestSpanningTree, LengthBoundedDisjointPaths, LongestCircuit, LongestPath, MaxCut, + MaximalIS, MaximumAchromaticNumber, MaximumClique, MaximumIndependentSet, MaximumMatching, + MinMaxMulticenter, MinimumCoveringByCliques, MinimumCutIntoBoundedSets, MinimumDominatingSet, + MinimumDummyActivitiesPert, MinimumFeedbackArcSet, MinimumFeedbackVertexSet, + MinimumGeometricConnectedDominatingSet, MinimumIntersectionGraphBasis, MinimumMaximalMatching, + MinimumMultiwayCut, MinimumSumMulticenter, MinimumVertexCover, MixedChinesePostman, + MonochromaticTriangle, MultipleChoiceBranching, MultipleCopyFileAllocation, + OptimalLinearArrangement, PartialFeedbackEdgeSet, PartitionIntoCliques, PartitionIntoForests, + PartitionIntoPathsOfLength2, PartitionIntoPerfectMatchings, PartitionIntoTriangles, PathConstrainedNetworkFlow, RootedTreeArrangement, RuralPostman, ShortestWeightConstrainedPath, SpinGlass, SteinerTree, SteinerTreeInGraphs, StrongConnectivityAugmentation, SubgraphIsomorphism, TravelingSalesman, UndirectedFlowLowerBounds, @@ -39,23 +44,26 @@ pub use graph::{ }; pub use misc::PartiallyOrderedKnapsack; pub use misc::{ - AdditionalKey, BinPacking, CapacityAssignment, CbqRelation, ConjunctiveBooleanQuery, - ConjunctiveQueryFoldability, ConsistencyOfDatabaseFrequencyTables, CosineProductIntegration, - EnsembleComputation, ExpectedRetrievalCost, Factoring, FlowShopScheduling, GroupingBySwapping, - IntExpr, IntegerExpressionMembership, JobShopScheduling, Knapsack, KthLargestMTuple, - LongestCommonSubsequence, MinimumExternalMacroDataCompression, + AdditionalKey, Betweenness, BinPacking, CapacityAssignment, CbqRelation, + ConjunctiveBooleanQuery, ConjunctiveQueryFoldability, ConsistencyOfDatabaseFrequencyTables, + CosineProductIntegration, CyclicOrdering, DynamicStorageAllocation, EnsembleComputation, + ExpectedRetrievalCost, Factoring, FeasibleRegisterAssignment, FlowShopScheduling, + GroupingBySwapping, IntExpr, IntegerExpressionMembership, JobShopScheduling, Knapsack, + KthLargestMTuple, LongestCommonSubsequence, MinimumExternalMacroDataCompression, MinimumInternalMacroDataCompression, MinimumTardinessSequencing, MultiprocessorScheduling, - PaintShop, Partition, PrecedenceConstrainedScheduling, ProductionPlanning, QueryArg, - RectilinearPictureCompression, RegisterSufficiency, ResourceConstrainedScheduling, - SchedulingToMinimizeWeightedCompletionTime, SchedulingWithIndividualDeadlines, - SequencingToMinimizeMaximumCumulativeCost, SequencingToMinimizeTardyTaskWeight, - SequencingToMinimizeWeightedCompletionTime, SequencingToMinimizeWeightedTardiness, - SequencingWithReleaseTimesAndDeadlines, SequencingWithinIntervals, ShortestCommonSupersequence, - StackerCrane, StaffScheduling, StringToStringCorrection, SubsetSum, SumOfSquaresPartition, - Term, ThreePartition, TimetableDesign, + NonLivenessFreePetriNet, Numerical3DimensionalMatching, PaintShop, Partition, + PrecedenceConstrainedScheduling, ProductionPlanning, QueryArg, RectilinearPictureCompression, + RegisterSufficiency, ResourceConstrainedScheduling, SchedulingToMinimizeWeightedCompletionTime, + SchedulingWithIndividualDeadlines, SequencingToMinimizeMaximumCumulativeCost, + SequencingToMinimizeTardyTaskWeight, SequencingToMinimizeWeightedCompletionTime, + SequencingToMinimizeWeightedTardiness, SequencingWithReleaseTimesAndDeadlines, + SequencingWithinIntervals, ShortestCommonSupersequence, StackerCrane, StaffScheduling, + StringToStringCorrection, SubsetProduct, SubsetSum, SumOfSquaresPartition, Term, + ThreePartition, TimetableDesign, }; pub use set::{ ComparativeContainment, ConsecutiveSets, ExactCoverBy3Sets, IntegerKnapsack, MaximumSetPacking, MinimumCardinalityKey, MinimumHittingSet, MinimumSetCovering, PrimeAttributeName, - RootedTreeStorageAssignment, SetBasis, SetSplitting, TwoDimensionalConsecutiveSets, + RootedTreeStorageAssignment, SetBasis, SetSplitting, ThreeDimensionalMatching, + TwoDimensionalConsecutiveSets, }; diff --git a/src/models/set/mod.rs b/src/models/set/mod.rs index 785fde23..dd8f5115 100644 --- a/src/models/set/mod.rs +++ b/src/models/set/mod.rs @@ -10,7 +10,11 @@ //! - [`MinimumSetCovering`]: Minimum weight set cover //! - [`PrimeAttributeName`]: Determine if an attribute belongs to any candidate key //! - [`RootedTreeStorageAssignment`]: Extend subsets to directed tree paths within a total-cost bound +//! - [`SetBasis`]: Minimum-cardinality basis generating all sets by union //! - [`SetSplitting`]: 2-color universe so every specified subset is non-monochromatic +//! - [`ThreeDimensionalMatching`]: Perfect matching in a tripartite 3-uniform hypergraph +//! - [`TwoDimensionalConsecutiveSets`]: 2D consecutive arrangement of subset elements +//! - [`MinimumCardinalityKey`]: Smallest attribute set that uniquely identifies tuples pub(crate) mod comparative_containment; pub(crate) mod consecutive_sets; @@ -24,6 +28,7 @@ pub(crate) mod prime_attribute_name; pub(crate) mod rooted_tree_storage_assignment; pub(crate) mod set_basis; pub(crate) mod set_splitting; +pub(crate) mod three_dimensional_matching; pub(crate) mod two_dimensional_consecutive_sets; pub use comparative_containment::ComparativeContainment; @@ -38,6 +43,7 @@ pub use prime_attribute_name::PrimeAttributeName; pub use rooted_tree_storage_assignment::RootedTreeStorageAssignment; pub use set_basis::SetBasis; pub use set_splitting::SetSplitting; +pub use three_dimensional_matching::ThreeDimensionalMatching; pub use two_dimensional_consecutive_sets::TwoDimensionalConsecutiveSets; #[cfg(feature = "example-db")] @@ -55,6 +61,7 @@ pub(crate) fn canonical_model_example_specs() -> Vec", description: "Set M of triples (w, x, y)" }, + ], + } +} + +/// Three-Dimensional Matching (3DM) problem. +/// +/// Given disjoint sets W = {0, ..., q-1}, X = {0, ..., q-1}, Y = {0, ..., q-1} +/// and a set M of triples (w, x, y) where w is in W, x is in X, y is in Y, +/// determine if there exists a subset M' of M with |M'| = q such that no two +/// triples in M' agree in any coordinate. +/// +/// This is a classical NP-complete problem (Karp, 1972), closely related to +/// Exact Cover by 3-Sets. +/// +/// # Example +/// +/// ``` +/// use problemreductions::models::set::ThreeDimensionalMatching; +/// use problemreductions::{Problem, Solver, BruteForce}; +/// +/// // W = X = Y = {0, 1, 2} (q = 3) +/// // Triples: (0,1,2), (1,0,1), (2,2,0), (0,0,0), (1,2,2) +/// let problem = ThreeDimensionalMatching::new( +/// 3, +/// vec![(0, 1, 2), (1, 0, 1), (2, 2, 0), (0, 0, 0), (1, 2, 2)], +/// ); +/// +/// let solver = BruteForce::new(); +/// let solutions = solver.find_all_witnesses(&problem); +/// +/// // First three triples form a valid matching +/// assert!(!solutions.is_empty()); +/// assert!(problem.evaluate(&solutions[0])); +/// ``` +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ThreeDimensionalMatching { + /// Size of each set W, X, Y (elements are 0..universe_size). + universe_size: usize, + /// Set M of triples (w, x, y) where w, x, y are in 0..universe_size. + triples: Vec<(usize, usize, usize)>, +} + +impl ThreeDimensionalMatching { + /// Create a new 3DM problem. + /// + /// # Panics + /// + /// Panics if any triple contains an element outside 0..universe_size. + pub fn new(universe_size: usize, triples: Vec<(usize, usize, usize)>) -> Self { + for (i, &(w, x, y)) in triples.iter().enumerate() { + assert!( + w < universe_size, + "Triple {} has w-coordinate {} which is outside 0..{}", + i, + w, + universe_size + ); + assert!( + x < universe_size, + "Triple {} has x-coordinate {} which is outside 0..{}", + i, + x, + universe_size + ); + assert!( + y < universe_size, + "Triple {} has y-coordinate {} which is outside 0..{}", + i, + y, + universe_size + ); + } + Self { + universe_size, + triples, + } + } + + /// Get the universe size (q). + pub fn universe_size(&self) -> usize { + self.universe_size + } + + /// Get the number of triples in M. + pub fn num_triples(&self) -> usize { + self.triples.len() + } + + /// Get the triples. + pub fn triples(&self) -> &[(usize, usize, usize)] { + &self.triples + } + + /// Get a specific triple. + pub fn get_triple(&self, index: usize) -> Option<&(usize, usize, usize)> { + self.triples.get(index) + } + + /// Check if a configuration is a valid three-dimensional matching. + /// + /// A valid matching selects exactly q triples where all W-coordinates + /// are distinct, all X-coordinates are distinct, and all Y-coordinates + /// are distinct. + pub fn is_valid_solution(&self, config: &[usize]) -> bool { + self.evaluate(config).0 + } +} + +impl Problem for ThreeDimensionalMatching { + const NAME: &'static str = "ThreeDimensionalMatching"; + type Value = crate::types::Or; + + fn dims(&self) -> Vec { + vec![2; self.triples.len()] + } + + fn evaluate(&self, config: &[usize]) -> crate::types::Or { + crate::types::Or({ + if config.len() != self.triples.len() || config.iter().any(|&value| value > 1) { + return crate::types::Or(false); + } + + // Count selected triples + let selected_count: usize = config.iter().filter(|&&v| v == 1).sum(); + if selected_count != self.universe_size { + return crate::types::Or(false); + } + + // Check that selected triples have all distinct coordinates + let mut used_w = HashSet::with_capacity(self.universe_size); + let mut used_x = HashSet::with_capacity(self.universe_size); + let mut used_y = HashSet::with_capacity(self.universe_size); + + for (i, &selected) in config.iter().enumerate() { + if selected == 1 { + let (w, x, y) = self.triples[i]; + if !used_w.insert(w) { + return crate::types::Or(false); + } + if !used_x.insert(x) { + return crate::types::Or(false); + } + if !used_y.insert(y) { + return crate::types::Or(false); + } + } + } + + true + }) + } + + fn variant() -> Vec<(&'static str, &'static str)> { + crate::variant_params![] + } +} + +crate::declare_variants! { + default ThreeDimensionalMatching => "2^num_triples", +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_model_example_specs() -> Vec { + vec![crate::example_db::specs::ModelExampleSpec { + id: "three_dimensional_matching", + instance: Box::new(ThreeDimensionalMatching::new( + 3, + vec![(0, 1, 2), (1, 0, 1), (2, 2, 0), (0, 0, 0), (1, 2, 2)], + )), + optimal_config: vec![1, 1, 1, 0, 0], + optimal_value: serde_json::json!(true), + }] +} + +#[cfg(test)] +#[path = "../../unit_tests/models/set/three_dimensional_matching.rs"] +mod tests; diff --git a/src/unit_tests/models/algebraic/algebraic_equations_over_gf2.rs b/src/unit_tests/models/algebraic/algebraic_equations_over_gf2.rs new file mode 100644 index 00000000..1e1750e1 --- /dev/null +++ b/src/unit_tests/models/algebraic/algebraic_equations_over_gf2.rs @@ -0,0 +1,191 @@ +use crate::models::algebraic::AlgebraicEquationsOverGF2; +use crate::solvers::BruteForce; +use crate::traits::Problem; +use crate::types::Or; + +/// n=3, equations: +/// eq0: x0*x1 + x2 = 0 +/// eq1: x1*x2 + x0 + 1 = 0 +/// eq2: x0 + x1 + x2 + 1 = 0 +/// Solution: (1,0,0) +fn satisfiable_problem() -> AlgebraicEquationsOverGF2 { + AlgebraicEquationsOverGF2::new( + 3, + vec![ + vec![vec![0, 1], vec![2]], + vec![vec![1, 2], vec![0], vec![]], + vec![vec![0], vec![1], vec![2], vec![]], + ], + ) + .unwrap() +} + +/// n=2, equations: +/// eq0: x0 + x1 = 0 (x0 XOR x1 = 0, so x0 = x1) +/// eq1: x0 + x1 + 1 = 0 (x0 XOR x1 XOR 1 = 0, so x0 != x1) +/// No solution — contradictory. +fn unsatisfiable_problem() -> AlgebraicEquationsOverGF2 { + AlgebraicEquationsOverGF2::new( + 2, + vec![vec![vec![0], vec![1]], vec![vec![0], vec![1], vec![]]], + ) + .unwrap() +} + +#[test] +fn test_algebraic_equations_over_gf2_creation_and_accessors() { + let p = satisfiable_problem(); + assert_eq!(p.num_variables(), 3); + assert_eq!(p.num_equations(), 3); + assert_eq!(p.equations().len(), 3); + assert_eq!(p.dims(), vec![2, 2, 2]); + assert_eq!(p.num_variables(), 3); + assert_eq!( + ::NAME, + "AlgebraicEquationsOverGF2" + ); + assert_eq!(::variant(), vec![]); +} + +#[test] +fn test_algebraic_equations_over_gf2_evaluate_satisfiable() { + let p = satisfiable_problem(); + // config [1,0,0]: + // eq0: 1*0 + 0 = 0 ✓ + // eq1: 0*0 + 1 + 1 = 0 ✓ + // eq2: 1 + 0 + 0 + 1 = 0 ✓ + assert_eq!(p.evaluate(&[1, 0, 0]), Or(true)); + + // config [0,0,0]: + // eq0: 0*0 + 0 = 0 ✓ + // eq1: 0*0 + 0 + 1 = 1 ✗ + assert_eq!(p.evaluate(&[0, 0, 0]), Or(false)); + + // config [1,1,1]: + // eq0: 1*1 + 1 = 0 ✓ + // eq1: 1*1 + 1 + 1 = 1 ✗ + assert_eq!(p.evaluate(&[1, 1, 1]), Or(false)); +} + +#[test] +fn test_algebraic_equations_over_gf2_evaluate_unsatisfiable() { + let p = unsatisfiable_problem(); + assert_eq!(p.dims(), vec![2, 2]); + // All 4 assignments should fail + assert_eq!(p.evaluate(&[0, 0]), Or(false)); // eq0: 0+0=0 ✓, eq1: 0+0+1=1 ✗ + assert_eq!(p.evaluate(&[0, 1]), Or(false)); // eq0: 0+1=1 ✗ + assert_eq!(p.evaluate(&[1, 0]), Or(false)); // eq0: 1+0=1 ✗ + assert_eq!(p.evaluate(&[1, 1]), Or(false)); // eq0: 1+1=0 ✓, eq1: 1+1+1=1 ✗ +} + +#[test] +fn test_algebraic_equations_over_gf2_constant_monomial() { + // Single equation: 1 = 0 (always false) + let p = AlgebraicEquationsOverGF2::new(1, vec![vec![vec![]]]).unwrap(); + assert_eq!(p.evaluate(&[0]), Or(false)); + assert_eq!(p.evaluate(&[1]), Or(false)); + + // Single equation: 1 + 1 = 0 (always true — two constants XOR to 0) + let p2 = AlgebraicEquationsOverGF2::new(1, vec![vec![vec![], vec![]]]).unwrap(); + assert_eq!(p2.evaluate(&[0]), Or(true)); + assert_eq!(p2.evaluate(&[1]), Or(true)); +} + +#[test] +fn test_algebraic_equations_over_gf2_empty_equations() { + // No equations: trivially satisfied + let p = AlgebraicEquationsOverGF2::new(2, vec![]).unwrap(); + assert_eq!(p.evaluate(&[0, 0]), Or(true)); + assert_eq!(p.evaluate(&[1, 1]), Or(true)); +} + +#[test] +fn test_algebraic_equations_over_gf2_empty_polynomial() { + // One equation with no monomials: sum = 0, so satisfied + let p = AlgebraicEquationsOverGF2::new(2, vec![vec![]]).unwrap(); + assert_eq!(p.evaluate(&[0, 0]), Or(true)); +} + +#[test] +fn test_algebraic_equations_over_gf2_brute_force_finds_witness() { + let solver = BruteForce::new(); + let p = satisfiable_problem(); + let witness = solver.find_witness(&p).unwrap(); + assert_eq!(p.evaluate(&witness), Or(true)); +} + +#[test] +fn test_algebraic_equations_over_gf2_brute_force_no_witness() { + let solver = BruteForce::new(); + assert!(solver.find_witness(&unsatisfiable_problem()).is_none()); +} + +#[test] +fn test_algebraic_equations_over_gf2_brute_force_finds_all_witnesses() { + let solver = BruteForce::new(); + let p = satisfiable_problem(); + let all = solver.find_all_witnesses(&p); + assert!(!all.is_empty()); + assert!(all.iter().all(|sol| p.evaluate(sol) == Or(true))); +} + +#[test] +fn test_algebraic_equations_over_gf2_serialization() { + let p = satisfiable_problem(); + let json = serde_json::to_value(&p).unwrap(); + assert_eq!(json["num_variables"], 3); + assert_eq!(json["equations"].as_array().unwrap().len(), 3); + + let restored: AlgebraicEquationsOverGF2 = serde_json::from_value(json).unwrap(); + assert_eq!(restored.num_variables(), p.num_variables()); + assert_eq!(restored.num_equations(), p.num_equations()); + // Check round-trip preserves evaluation + assert_eq!(restored.evaluate(&[1, 0, 0]), Or(true)); +} + +#[test] +fn test_algebraic_equations_over_gf2_deserialization_rejects_invalid() { + // Variable index out of range + let r: Result = serde_json::from_value(serde_json::json!({ + "num_variables": 2, + "equations": [[[0, 5]]] + })); + assert!(r.is_err()); + + // Unsorted monomial + let r: Result = serde_json::from_value(serde_json::json!({ + "num_variables": 3, + "equations": [[[1, 0]]] + })); + assert!(r.is_err()); + + // Duplicate variable in monomial + let r: Result = serde_json::from_value(serde_json::json!({ + "num_variables": 3, + "equations": [[[1, 1]]] + })); + assert!(r.is_err()); +} + +#[test] +fn test_algebraic_equations_over_gf2_validation_errors() { + // Out of range + assert!(AlgebraicEquationsOverGF2::new(2, vec![vec![vec![3]]]).is_err()); + // Not sorted + assert!(AlgebraicEquationsOverGF2::new(3, vec![vec![vec![2, 1]]]).is_err()); + // Duplicate + assert!(AlgebraicEquationsOverGF2::new(3, vec![vec![vec![1, 1]]]).is_err()); + // Valid + assert!(AlgebraicEquationsOverGF2::new(3, vec![vec![vec![0, 1, 2]]]).is_ok()); +} + +#[test] +fn test_algebraic_equations_over_gf2_paper_example() { + // Canonical example from the issue: n=3, 3 equations, config [1,0,0] + let p = satisfiable_problem(); + assert_eq!(p.evaluate(&[1, 0, 0]), Or(true)); + + let solver = BruteForce::new(); + let witness = solver.find_witness(&p).unwrap(); + assert_eq!(p.evaluate(&witness), Or(true)); +} diff --git a/src/unit_tests/models/algebraic/minimum_matrix_domination.rs b/src/unit_tests/models/algebraic/minimum_matrix_domination.rs new file mode 100644 index 00000000..b5a968e9 --- /dev/null +++ b/src/unit_tests/models/algebraic/minimum_matrix_domination.rs @@ -0,0 +1,180 @@ +use super::*; +use crate::solvers::BruteForce; +use crate::traits::Problem; +use crate::types::Min; + +/// P6 adjacency matrix (6×6 symmetric, 10 ones). +fn p6_adjacency_matrix() -> Vec> { + vec![ + vec![false, true, false, false, false, false], + vec![true, false, true, false, false, false], + vec![false, true, false, true, false, false], + vec![false, false, true, false, true, false], + vec![false, false, false, true, false, true], + vec![false, false, false, false, true, false], + ] +} + +#[test] +fn test_minimum_matrix_domination_creation() { + let problem = MinimumMatrixDomination::new(p6_adjacency_matrix()); + assert_eq!(problem.num_rows(), 6); + assert_eq!(problem.num_cols(), 6); + assert_eq!(problem.num_ones(), 10); + assert_eq!(problem.dims(), vec![2; 10]); + assert_eq!( + ::NAME, + "MinimumMatrixDomination" + ); + assert_eq!(::variant(), vec![]); +} + +#[test] +fn test_minimum_matrix_domination_ones_enumeration() { + let problem = MinimumMatrixDomination::new(p6_adjacency_matrix()); + let expected_ones = vec![ + (0, 1), + (1, 0), + (1, 2), + (2, 1), + (2, 3), + (3, 2), + (3, 4), + (4, 3), + (4, 5), + (5, 4), + ]; + assert_eq!(problem.ones(), &expected_ones); +} + +#[test] +fn test_minimum_matrix_domination_evaluate_optimal() { + let problem = MinimumMatrixDomination::new(p6_adjacency_matrix()); + // Select entries 0,1,6,7: (0,1),(1,0),(3,4),(4,3) + // Covered rows: {0,1,3,4}, covered cols: {0,1,3,4} + // Unselected: (1,2) row 1 covered, (2,1) col 1 covered, (2,3) col 3 covered, + // (3,2) row 3 covered, (4,5) row 4 covered, (5,4) col 4 covered + let config = vec![1, 1, 0, 0, 0, 0, 1, 1, 0, 0]; + assert_eq!(problem.evaluate(&config), Min(Some(4))); +} + +#[test] +fn test_minimum_matrix_domination_evaluate_infeasible() { + let problem = MinimumMatrixDomination::new(p6_adjacency_matrix()); + // Select only entry 0: (0,1) — covers row 0, col 1 + // Entry (2,3) at index 4: row 2 not covered, col 3 not covered → infeasible + let config = vec![1, 0, 0, 0, 0, 0, 0, 0, 0, 0]; + assert_eq!(problem.evaluate(&config), Min(None)); +} + +#[test] +fn test_minimum_matrix_domination_evaluate_all_selected() { + let problem = MinimumMatrixDomination::new(p6_adjacency_matrix()); + let config = vec![1; 10]; + assert_eq!(problem.evaluate(&config), Min(Some(10))); +} + +#[test] +fn test_minimum_matrix_domination_evaluate_wrong_length() { + let problem = MinimumMatrixDomination::new(p6_adjacency_matrix()); + assert_eq!(problem.evaluate(&[1, 0]), Min(None)); + assert_eq!(problem.evaluate(&[1; 11]), Min(None)); +} + +#[test] +fn test_minimum_matrix_domination_evaluate_invalid_variable() { + let problem = MinimumMatrixDomination::new(p6_adjacency_matrix()); + let mut config = vec![0; 10]; + config[0] = 2; + assert_eq!(problem.evaluate(&config), Min(None)); +} + +#[test] +fn test_minimum_matrix_domination_brute_force() { + let problem = MinimumMatrixDomination::new(p6_adjacency_matrix()); + let solver = BruteForce::new(); + let witness = solver.find_witness(&problem).expect("should find optimal"); + let val = problem.evaluate(&witness); + assert_eq!(val, Min(Some(4))); +} + +#[test] +fn test_minimum_matrix_domination_identity_matrix() { + // 3×3 identity: 3 ones on diagonal, no shared rows/cols + // Every entry must be selected + let matrix = vec![ + vec![true, false, false], + vec![false, true, false], + vec![false, false, true], + ]; + let problem = MinimumMatrixDomination::new(matrix); + assert_eq!(problem.num_ones(), 3); + let solver = BruteForce::new(); + let witness = solver.find_witness(&problem).expect("should find optimal"); + assert_eq!(problem.evaluate(&witness), Min(Some(3))); + assert_eq!(witness, vec![1, 1, 1]); +} + +#[test] +fn test_minimum_matrix_domination_single_row() { + // One row with multiple ones: selecting any one dominates all others + let matrix = vec![vec![true, true, true]]; + let problem = MinimumMatrixDomination::new(matrix); + assert_eq!(problem.num_ones(), 3); + let solver = BruteForce::new(); + let witness = solver.find_witness(&problem).expect("should find optimal"); + assert_eq!(problem.evaluate(&witness), Min(Some(1))); +} + +#[test] +fn test_minimum_matrix_domination_empty_matrix() { + let problem = MinimumMatrixDomination::new(vec![]); + assert_eq!(problem.num_ones(), 0); + assert_eq!(problem.dims(), Vec::::new()); + // Empty config: vacuously valid with 0 selected + assert_eq!(problem.evaluate(&[]), Min(Some(0))); +} + +#[test] +fn test_minimum_matrix_domination_no_ones() { + let matrix = vec![vec![false, false], vec![false, false]]; + let problem = MinimumMatrixDomination::new(matrix); + assert_eq!(problem.num_ones(), 0); + assert_eq!(problem.evaluate(&[]), Min(Some(0))); +} + +#[test] +fn test_minimum_matrix_domination_serialization() { + let matrix = vec![vec![true, false], vec![false, true]]; + let problem = MinimumMatrixDomination::new(matrix); + let json = serde_json::to_value(&problem).unwrap(); + assert_eq!( + json, + serde_json::json!({ + "matrix": [[true, false], [false, true]], + "ones": [[0, 0], [1, 1]], + }) + ); + let restored: MinimumMatrixDomination = serde_json::from_value(json).unwrap(); + assert_eq!(restored.num_rows(), 2); + assert_eq!(restored.num_cols(), 2); + assert_eq!(restored.num_ones(), 2); +} + +#[test] +fn test_minimum_matrix_domination_complexity_metadata() { + use crate::registry::VariantEntry; + + let entry = inventory::iter::() + .find(|entry| entry.name == "MinimumMatrixDomination") + .expect("MinimumMatrixDomination variant entry should exist"); + + assert_eq!(entry.complexity, "2^num_ones"); +} + +#[test] +#[should_panic(expected = "same length")] +fn test_minimum_matrix_domination_inconsistent_rows() { + let matrix = vec![vec![true, false], vec![true]]; + MinimumMatrixDomination::new(matrix); +} diff --git a/src/unit_tests/models/algebraic/minimum_weight_solution_to_linear_equations.rs b/src/unit_tests/models/algebraic/minimum_weight_solution_to_linear_equations.rs new file mode 100644 index 00000000..4c6b2b30 --- /dev/null +++ b/src/unit_tests/models/algebraic/minimum_weight_solution_to_linear_equations.rs @@ -0,0 +1,139 @@ +use super::*; +use crate::solvers::BruteForce; +use crate::traits::Problem; +use crate::types::Min; + +/// A = [[1,2,3,1],[2,1,1,3]], b = [5,4] +fn example_instance() -> MinimumWeightSolutionToLinearEquations { + let matrix = vec![vec![1, 2, 3, 1], vec![2, 1, 1, 3]]; + let rhs = vec![5, 4]; + MinimumWeightSolutionToLinearEquations::new(matrix, rhs) +} + +#[test] +fn test_minimum_weight_solution_creation() { + let problem = example_instance(); + assert_eq!(problem.num_equations(), 2); + assert_eq!(problem.num_variables(), 4); + assert_eq!(problem.dims(), vec![2; 4]); + assert_eq!( + ::NAME, + "MinimumWeightSolutionToLinearEquations" + ); + assert_eq!( + ::variant(), + vec![] + ); +} + +#[test] +fn test_minimum_weight_solution_evaluate_consistent() { + let problem = example_instance(); + // Select columns 0,1: submatrix [[1,2],[2,1]], b=[5,4] → y=(1,2). Consistent. + let config = vec![1, 1, 0, 0]; + assert_eq!(problem.evaluate(&config), Min(Some(2))); +} + +#[test] +fn test_minimum_weight_solution_evaluate_inconsistent() { + let problem = example_instance(); + // Select only column 0: [1;2]y=[5;4] → y=5, but 2*5=10 ≠ 4. Inconsistent. + let config = vec![1, 0, 0, 0]; + assert_eq!(problem.evaluate(&config), Min(None)); +} + +#[test] +fn test_minimum_weight_solution_evaluate_all_selected() { + let problem = example_instance(); + // All 4 columns selected — system has solution, so feasible with value 4. + let config = vec![1, 1, 1, 1]; + assert_eq!(problem.evaluate(&config), Min(Some(4))); +} + +#[test] +fn test_minimum_weight_solution_evaluate_none_selected() { + let problem = example_instance(); + // No columns selected, b ≠ 0 → infeasible. + let config = vec![0, 0, 0, 0]; + assert_eq!(problem.evaluate(&config), Min(None)); +} + +#[test] +fn test_minimum_weight_solution_evaluate_wrong_length() { + let problem = example_instance(); + assert_eq!(problem.evaluate(&[1, 0]), Min(None)); + assert_eq!(problem.evaluate(&[1; 5]), Min(None)); +} + +#[test] +fn test_minimum_weight_solution_evaluate_invalid_variable() { + let problem = example_instance(); + let config = vec![2, 0, 0, 0]; + assert_eq!(problem.evaluate(&config), Min(None)); +} + +#[test] +fn test_minimum_weight_solution_brute_force() { + let problem = example_instance(); + let solver = BruteForce::new(); + let witness = solver.find_witness(&problem).expect("should find optimal"); + let val = problem.evaluate(&witness); + assert_eq!(val, Min(Some(2))); +} + +#[test] +fn test_minimum_weight_solution_zero_rhs() { + // A = [[1,1],[2,2]], b = [0,0] — trivially consistent with 0 columns. + let matrix = vec![vec![1, 1], vec![2, 2]]; + let rhs = vec![0, 0]; + let problem = MinimumWeightSolutionToLinearEquations::new(matrix, rhs); + let config = vec![0, 0]; + assert_eq!(problem.evaluate(&config), Min(Some(0))); +} + +#[test] +fn test_minimum_weight_solution_serialization() { + let problem = example_instance(); + let json = serde_json::to_value(&problem).unwrap(); + assert_eq!( + json, + serde_json::json!({ + "matrix": [[1, 2, 3, 1], [2, 1, 1, 3]], + "rhs": [5, 4], + }) + ); + let restored: MinimumWeightSolutionToLinearEquations = serde_json::from_value(json).unwrap(); + assert_eq!(restored.num_equations(), 2); + assert_eq!(restored.num_variables(), 4); +} + +#[test] +fn test_minimum_weight_solution_complexity_metadata() { + use crate::registry::VariantEntry; + + let entry = inventory::iter::() + .find(|entry| entry.name == "MinimumWeightSolutionToLinearEquations") + .expect("MinimumWeightSolutionToLinearEquations variant entry should exist"); + + assert_eq!(entry.complexity, "2^num_variables"); +} + +#[test] +#[should_panic(expected = "at least one row")] +fn test_minimum_weight_solution_empty_matrix() { + MinimumWeightSolutionToLinearEquations::new(vec![], vec![]); +} + +#[test] +#[should_panic(expected = "same length")] +fn test_minimum_weight_solution_inconsistent_rows() { + let matrix = vec![vec![1, 2], vec![3]]; + MinimumWeightSolutionToLinearEquations::new(matrix, vec![1, 2]); +} + +#[test] +#[should_panic(expected = "RHS length")] +fn test_minimum_weight_solution_rhs_mismatch() { + let matrix = vec![vec![1, 2], vec![3, 4]]; + MinimumWeightSolutionToLinearEquations::new(matrix, vec![1]); +} diff --git a/src/unit_tests/models/formula/non_tautology.rs b/src/unit_tests/models/formula/non_tautology.rs new file mode 100644 index 00000000..5a455af2 --- /dev/null +++ b/src/unit_tests/models/formula/non_tautology.rs @@ -0,0 +1,90 @@ +use super::*; +use crate::solvers::BruteForce; +use crate::traits::Problem; + +#[test] +fn test_non_tautology_creation() { + let problem = NonTautology::new(3, vec![vec![1, 2, 3], vec![-1, -2, -3]]); + assert_eq!(problem.num_vars(), 3); + assert_eq!(problem.num_disjuncts(), 2); + assert_eq!(problem.num_variables(), 3); + assert_eq!(problem.dims(), vec![2, 2, 2]); +} + +#[test] +fn test_non_tautology_evaluate() { + // (x1 AND x2 AND x3) OR (NOT x1 AND NOT x2 AND NOT x3) + let problem = NonTautology::new(3, vec![vec![1, 2, 3], vec![-1, -2, -3]]); + + // config [1,0,0] -> x1=T, x2=F, x3=F + // D1: x1=T, x2=F -> D1 false (x2 is false) + // D2: NOT x1=F -> D2 false (NOT x1 is false) + // All disjuncts false -> formula is false -> falsifying assignment exists + assert!(problem.evaluate(&[1, 0, 0])); + + // config [1,1,1] -> x1=T, x2=T, x3=T + // D1: all true -> D1 is true -> formula is true -> NOT a falsifying assignment + assert!(!problem.evaluate(&[1, 1, 1])); + + // config [0,0,0] -> x1=F, x2=F, x3=F + // D2: NOT x1=T, NOT x2=T, NOT x3=T -> D2 is true -> formula is true + assert!(!problem.evaluate(&[0, 0, 0])); +} + +#[test] +fn test_non_tautology_solver() { + // (x1 AND x2 AND x3) OR (NOT x1 AND NOT x2 AND NOT x3) + let problem = NonTautology::new(3, vec![vec![1, 2, 3], vec![-1, -2, -3]]); + + let solver = BruteForce::new(); + let solution = solver.find_witness(&problem); + assert!(solution.is_some()); + + // Verify the found solution actually falsifies the formula + let sol = solution.unwrap(); + assert!(problem.evaluate(&sol)); + + // Check all witnesses are valid + let all_solutions = solver.find_all_witnesses(&problem); + assert!(!all_solutions.is_empty()); + for sol in &all_solutions { + assert!(problem.evaluate(sol)); + } +} + +#[test] +fn test_non_tautology_tautological() { + // (x1) OR (NOT x1) is a tautology — no falsifying assignment exists + let problem = NonTautology::new(1, vec![vec![1], vec![-1]]); + + let solver = BruteForce::new(); + assert!(solver.find_witness(&problem).is_none()); +} + +#[test] +fn test_non_tautology_serialization() { + let problem = NonTautology::new(3, vec![vec![1, 2, 3], vec![-1, -2, -3]]); + let json = serde_json::to_string(&problem).unwrap(); + let deserialized: NonTautology = serde_json::from_str(&json).unwrap(); + assert_eq!(deserialized.num_vars(), 3); + assert_eq!(deserialized.num_disjuncts(), 2); +} + +#[test] +fn test_non_tautology_is_falsifying() { + let problem = NonTautology::new(3, vec![vec![1, 2], vec![-1, 3], vec![2, -3]]); + // x1=F, x2=F, x3=F: + // D1: x1=F -> false. D2: NOT x1=T, x3=F -> false. D3: x2=F -> false. + // All false -> falsifying + assert!(problem.is_falsifying(&[false, false, false])); + + // x1=T, x2=T, x3=F: + // D1: x1=T, x2=T -> true. Not falsifying. + assert!(!problem.is_falsifying(&[true, true, false])); +} + +#[test] +#[should_panic(expected = "outside range")] +fn test_non_tautology_variable_out_of_range() { + NonTautology::new(2, vec![vec![1, 3]]); +} diff --git a/src/unit_tests/models/formula/one_in_three_satisfiability.rs b/src/unit_tests/models/formula/one_in_three_satisfiability.rs new file mode 100644 index 00000000..e20220e2 --- /dev/null +++ b/src/unit_tests/models/formula/one_in_three_satisfiability.rs @@ -0,0 +1,124 @@ +use super::*; +use crate::solvers::BruteForce; +use crate::traits::Problem; + +#[test] +fn test_one_in_three_satisfiability_creation() { + let problem = OneInThreeSatisfiability::new( + 4, + vec![ + CNFClause::new(vec![1, 2, 3]), + CNFClause::new(vec![-1, 3, 4]), + CNFClause::new(vec![2, -3, -4]), + ], + ); + assert_eq!(problem.num_vars(), 4); + assert_eq!(problem.num_clauses(), 3); + assert_eq!(problem.num_variables(), 4); + assert_eq!(problem.dims(), vec![2, 2, 2, 2]); +} + +#[test] +fn test_one_in_three_satisfiability_evaluate() { + let problem = OneInThreeSatisfiability::new( + 4, + vec![ + CNFClause::new(vec![1, 2, 3]), + CNFClause::new(vec![-1, 3, 4]), + CNFClause::new(vec![2, -3, -4]), + ], + ); + + // config [1,0,0,1] -> x1=T, x2=F, x3=F, x4=T + // Clause 1: (T, F, F) -> exactly 1 true -> OK + // Clause 2: (F, F, T) -> exactly 1 true -> OK + // Clause 3: (F, T, F) -> exactly 1 true -> OK + assert!(problem.evaluate(&[1, 0, 0, 1])); + + // config [1,1,1,0] -> x1=T, x2=T, x3=T, x4=F + // Clause 1: (T, T, T) -> 3 true -> NOT 1-in-3 + assert!(!problem.evaluate(&[1, 1, 1, 0])); + + // config [0,0,0,0] -> all false + // Clause 1: (F, F, F) -> 0 true -> NOT 1-in-3 + assert!(!problem.evaluate(&[0, 0, 0, 0])); +} + +#[test] +fn test_one_in_three_satisfiability_solver() { + let problem = OneInThreeSatisfiability::new( + 4, + vec![ + CNFClause::new(vec![1, 2, 3]), + CNFClause::new(vec![-1, 3, 4]), + CNFClause::new(vec![2, -3, -4]), + ], + ); + + let solver = BruteForce::new(); + let solution = solver.find_witness(&problem); + assert!(solution.is_some()); + + // Verify the found solution actually satisfies 1-in-3 + let sol = solution.unwrap(); + assert!(problem.evaluate(&sol)); + + // Check all witnesses are valid + let all_solutions = solver.find_all_witnesses(&problem); + assert!(!all_solutions.is_empty()); + for sol in &all_solutions { + assert!(problem.evaluate(sol)); + } +} + +#[test] +fn test_one_in_three_satisfiability_unsatisfiable() { + // (x1 OR x1 OR x1) requires exactly 1 true among (x1, x1, x1) + // If x1=T, 3 true. If x1=F, 0 true. Neither is 1. + let problem = OneInThreeSatisfiability::new(1, vec![CNFClause::new(vec![1, 1, 1])]); + + let solver = BruteForce::new(); + assert!(solver.find_witness(&problem).is_none()); +} + +#[test] +fn test_one_in_three_satisfiability_serialization() { + let problem = OneInThreeSatisfiability::new( + 3, + vec![ + CNFClause::new(vec![1, 2, 3]), + CNFClause::new(vec![-1, -2, -3]), + ], + ); + let json = serde_json::to_string(&problem).unwrap(); + let deserialized: OneInThreeSatisfiability = serde_json::from_str(&json).unwrap(); + assert_eq!(deserialized.num_vars(), 3); + assert_eq!(deserialized.num_clauses(), 2); +} + +#[test] +fn test_one_in_three_satisfiability_is_one_in_three_satisfying() { + let problem = OneInThreeSatisfiability::new( + 3, + vec![ + CNFClause::new(vec![1, 2, 3]), + CNFClause::new(vec![-1, -2, 3]), + ], + ); + // x1=F, x2=F, x3=T -> clause 1: (F,F,T)=1 OK, clause 2: (T,T,T)=3 FAIL + assert!(!problem.is_one_in_three_satisfying(&[false, false, true])); + // x1=T, x2=F, x3=F -> clause 1: (T,F,F)=1 OK, clause 2: (F,T,F)=1 OK + assert!(problem.is_one_in_three_satisfying(&[true, false, false])); +} + +#[test] +#[should_panic(expected = "has 2 literals, expected 3")] +fn test_one_in_three_satisfiability_wrong_clause_width() { + OneInThreeSatisfiability::new(3, vec![CNFClause::new(vec![1, 2])]); +} + +#[test] +#[should_panic(expected = "outside range")] +fn test_one_in_three_satisfiability_variable_out_of_range() { + OneInThreeSatisfiability::new(2, vec![CNFClause::new(vec![1, 2, 3])]); +} diff --git a/src/unit_tests/models/formula/planar_3_satisfiability.rs b/src/unit_tests/models/formula/planar_3_satisfiability.rs new file mode 100644 index 00000000..ccbdffc8 --- /dev/null +++ b/src/unit_tests/models/formula/planar_3_satisfiability.rs @@ -0,0 +1,141 @@ +use super::*; +use crate::solvers::BruteForce; +use crate::traits::Problem; + +#[test] +fn test_planar_3_satisfiability_creation() { + let problem = Planar3Satisfiability::new( + 4, + vec![ + CNFClause::new(vec![1, 2, 3]), + CNFClause::new(vec![-1, 2, 4]), + CNFClause::new(vec![1, -3, 4]), + CNFClause::new(vec![-2, 3, -4]), + ], + ); + assert_eq!(problem.num_vars(), 4); + assert_eq!(problem.num_clauses(), 4); + assert_eq!(problem.num_variables(), 4); + assert_eq!(problem.dims(), vec![2, 2, 2, 2]); +} + +#[test] +fn test_planar_3_satisfiability_evaluate() { + let problem = Planar3Satisfiability::new( + 4, + vec![ + CNFClause::new(vec![1, 2, 3]), + CNFClause::new(vec![-1, 2, 4]), + CNFClause::new(vec![1, -3, 4]), + CNFClause::new(vec![-2, 3, -4]), + ], + ); + + // config [1,1,1,0] -> x1=T, x2=T, x3=T, x4=F + // (T OR T OR T)=T, (F OR T OR F)=T, (T OR F OR F)=T, (F OR T OR T)=T + assert!(problem.evaluate(&[1, 1, 1, 0])); + + // config [0,0,0,0] -> all false + // (F OR F OR F)=F -> unsatisfied + assert!(!problem.evaluate(&[0, 0, 0, 0])); +} + +#[test] +fn test_planar_3_satisfiability_solver() { + let problem = Planar3Satisfiability::new( + 4, + vec![ + CNFClause::new(vec![1, 2, 3]), + CNFClause::new(vec![-1, 2, 4]), + CNFClause::new(vec![1, -3, 4]), + CNFClause::new(vec![-2, 3, -4]), + ], + ); + + let solver = BruteForce::new(); + let solution = solver.find_witness(&problem); + assert!(solution.is_some()); + + // Verify the found solution actually satisfies the formula + let sol = solution.unwrap(); + assert!(problem.evaluate(&sol)); + + // Check all witnesses are valid + let all_solutions = solver.find_all_witnesses(&problem); + assert!(!all_solutions.is_empty()); + for sol in &all_solutions { + assert!(problem.evaluate(sol)); + } +} + +#[test] +fn test_planar_3_satisfiability_unsatisfiable() { + // Contradictory formula: (x1 OR x1 OR x1) AND (NOT x1 OR NOT x1 OR NOT x1) + // AND (x2 OR x2 OR x2) AND (NOT x2 OR NOT x2 OR NOT x2) + // This requires x1=T and x1=F simultaneously, same for x2 + let problem = Planar3Satisfiability::new( + 2, + vec![ + CNFClause::new(vec![1, 1, 1]), + CNFClause::new(vec![-1, -1, -1]), + CNFClause::new(vec![2, 2, 2]), + CNFClause::new(vec![-2, -2, -2]), + ], + ); + + let solver = BruteForce::new(); + assert!(solver.find_witness(&problem).is_none()); +} + +#[test] +fn test_planar_3_satisfiability_get_clause() { + let problem = Planar3Satisfiability::new( + 3, + vec![ + CNFClause::new(vec![1, 2, 3]), + CNFClause::new(vec![-1, -2, -3]), + ], + ); + assert_eq!(problem.get_clause(0), Some(&CNFClause::new(vec![1, 2, 3]))); + assert_eq!(problem.get_clause(2), None); +} + +#[test] +fn test_planar_3_satisfiability_is_satisfying() { + let problem = Planar3Satisfiability::new( + 3, + vec![ + CNFClause::new(vec![1, 2, 3]), + CNFClause::new(vec![-1, -2, 3]), + ], + ); + assert!(problem.is_satisfying(&[false, false, true])); // x3=T satisfies both + assert!(!problem.is_satisfying(&[false, false, false])); // all false fails clause 1 +} + +#[test] +fn test_planar_3_satisfiability_serialization() { + let problem = Planar3Satisfiability::new( + 3, + vec![ + CNFClause::new(vec![1, 2, 3]), + CNFClause::new(vec![-1, -2, -3]), + ], + ); + let json = serde_json::to_string(&problem).unwrap(); + let deserialized: Planar3Satisfiability = serde_json::from_str(&json).unwrap(); + assert_eq!(deserialized.num_vars(), 3); + assert_eq!(deserialized.num_clauses(), 2); +} + +#[test] +#[should_panic(expected = "has 2 literals, expected 3")] +fn test_planar_3_satisfiability_wrong_clause_width() { + Planar3Satisfiability::new(3, vec![CNFClause::new(vec![1, 2])]); +} + +#[test] +#[should_panic(expected = "outside range")] +fn test_planar_3_satisfiability_variable_out_of_range() { + Planar3Satisfiability::new(2, vec![CNFClause::new(vec![1, 2, 3])]); +} diff --git a/src/unit_tests/models/graph/bounded_diameter_spanning_tree.rs b/src/unit_tests/models/graph/bounded_diameter_spanning_tree.rs new file mode 100644 index 00000000..d4325e60 --- /dev/null +++ b/src/unit_tests/models/graph/bounded_diameter_spanning_tree.rs @@ -0,0 +1,134 @@ +use super::*; +use crate::solvers::BruteForce; +use crate::topology::SimpleGraph; +use crate::traits::Problem; + +fn example_instance() -> BoundedDiameterSpanningTree { + // 5 vertices, 7 edges with weights + // (0,1,1),(0,2,2),(0,3,1),(1,2,1),(1,4,2),(2,3,1),(3,4,1) + // B=5, D=3 + BoundedDiameterSpanningTree::new( + SimpleGraph::new( + 5, + vec![(0, 1), (0, 2), (0, 3), (1, 2), (1, 4), (2, 3), (3, 4)], + ), + vec![1, 2, 1, 1, 2, 1, 1], + 5, + 3, + ) +} + +#[test] +fn test_bounded_diameter_spanning_tree_creation() { + let problem = example_instance(); + assert_eq!(problem.num_vertices(), 5); + assert_eq!(problem.num_edges(), 7); + assert_eq!(problem.weight_bound(), &5); + assert_eq!(problem.diameter_bound(), 3); + assert_eq!(problem.dims(), vec![2; 7]); + assert_eq!(problem.graph().num_vertices(), 5); + assert_eq!(problem.edge_list().len(), 7); + assert_eq!(problem.edge_weights().len(), 7); + assert!(problem.is_weighted()); +} + +#[test] +fn test_bounded_diameter_spanning_tree_evaluate_valid() { + let problem = example_instance(); + // Edges: (0,1)=0, (0,2)=1, (0,3)=2, (1,2)=3, (1,4)=4, (2,3)=5, (3,4)=6 + // Select edges 0,2,5,6: (0,1),(0,3),(2,3),(3,4) + // Weight: 1+1+1+1 = 4 ≤ 5 + // Tree adjacency: 0-{1,3}, 1-{0}, 2-{3}, 3-{0,2,4}, 4-{3} + // Diameter: longest path is e.g. 1-0-3-2 or 1-0-3-4 = 3 edges ≤ 3 + assert!(problem.evaluate(&[1, 0, 1, 0, 0, 1, 1])); +} + +#[test] +fn test_bounded_diameter_spanning_tree_evaluate_exceeds_weight() { + let problem = example_instance(); + // Select edges 1,2,4,6: (0,2),(0,3),(1,4),(3,4) + // Weight: 2+1+2+1 = 6 > 5 + assert!(!problem.evaluate(&[0, 1, 1, 0, 1, 0, 1])); +} + +#[test] +fn test_bounded_diameter_spanning_tree_evaluate_exceeds_diameter() { + // Create instance with very tight diameter bound + let problem = BoundedDiameterSpanningTree::new( + SimpleGraph::new(4, vec![(0, 1), (1, 2), (2, 3)]), + vec![1, 1, 1], + 10, + 1, // diameter ≤ 1 means all vertices must be distance 1 from each other + ); + // The only spanning tree is the path 0-1-2-3 with diameter 3 + assert!(!problem.evaluate(&[1, 1, 1])); +} + +#[test] +fn test_bounded_diameter_spanning_tree_evaluate_not_tree() { + let problem = example_instance(); + // Too few edges + assert!(!problem.evaluate(&[1, 1, 0, 0, 0, 0, 0])); + // Too many edges + assert!(!problem.evaluate(&[1, 1, 1, 1, 1, 0, 0])); +} + +#[test] +fn test_bounded_diameter_spanning_tree_evaluate_wrong_length() { + let problem = example_instance(); + assert!(!problem.evaluate(&[0, 1, 0])); + assert!(!problem.evaluate(&[0, 1, 0, 0, 1, 0, 0, 1])); +} + +#[test] +fn test_bounded_diameter_spanning_tree_brute_force() { + let problem = example_instance(); + let solver = BruteForce::new(); + let solution = solver.find_witness(&problem); + assert!(solution.is_some()); + assert!(problem.evaluate(&solution.unwrap())); +} + +#[test] +fn test_bounded_diameter_spanning_tree_infeasible() { + // Path graph 0-1-2-3-4, all weight 1, weight bound 10 but diameter bound 2 + // Only spanning tree is the path itself with diameter 4 > 2 + let problem = BoundedDiameterSpanningTree::new( + SimpleGraph::new(5, vec![(0, 1), (1, 2), (2, 3), (3, 4)]), + vec![1, 1, 1, 1], + 10, + 2, + ); + let solver = BruteForce::new(); + assert!(solver.find_witness(&problem).is_none()); +} + +#[test] +fn test_bounded_diameter_spanning_tree_serialization() { + let problem = example_instance(); + let json = serde_json::to_string(&problem).unwrap(); + let deserialized: BoundedDiameterSpanningTree = + serde_json::from_str(&json).unwrap(); + assert_eq!(deserialized.num_vertices(), 5); + assert_eq!(deserialized.num_edges(), 7); + assert_eq!(deserialized.weight_bound(), &5); + assert_eq!(deserialized.diameter_bound(), 3); +} + +#[test] +#[should_panic(expected = "diameter_bound must be at least 1")] +fn test_bounded_diameter_spanning_tree_zero_diameter_panics() { + let _ = BoundedDiameterSpanningTree::new( + SimpleGraph::new(3, vec![(0, 1), (1, 2)]), + vec![1, 1], + 5, + 0, + ); +} + +#[test] +#[should_panic(expected = "edge_weights length must match num_edges")] +fn test_bounded_diameter_spanning_tree_wrong_weights_length_panics() { + let _ = + BoundedDiameterSpanningTree::new(SimpleGraph::new(3, vec![(0, 1), (1, 2)]), vec![1], 5, 2); +} diff --git a/src/unit_tests/models/graph/degree_constrained_spanning_tree.rs b/src/unit_tests/models/graph/degree_constrained_spanning_tree.rs new file mode 100644 index 00000000..e3dfaae7 --- /dev/null +++ b/src/unit_tests/models/graph/degree_constrained_spanning_tree.rs @@ -0,0 +1,128 @@ +use super::*; +use crate::solvers::BruteForce; +use crate::topology::SimpleGraph; +use crate::traits::Problem; + +fn example_instance() -> DegreeConstrainedSpanningTree { + // 5 vertices, 7 edges, K=2 + DegreeConstrainedSpanningTree::new( + SimpleGraph::new( + 5, + vec![(0, 1), (0, 2), (0, 3), (1, 2), (1, 4), (2, 3), (3, 4)], + ), + 2, + ) +} + +#[test] +fn test_degree_constrained_spanning_tree_creation() { + let problem = example_instance(); + assert_eq!(problem.num_vertices(), 5); + assert_eq!(problem.num_edges(), 7); + assert_eq!(problem.max_degree(), 2); + assert_eq!(problem.dims(), vec![2; 7]); + assert_eq!(problem.graph().num_vertices(), 5); + assert_eq!(problem.edge_list().len(), 7); +} + +#[test] +fn test_degree_constrained_spanning_tree_evaluate_valid() { + let problem = example_instance(); + // Edges: (0,1)=0, (0,2)=1, (0,3)=2, (1,2)=3, (1,4)=4, (2,3)=5, (3,4)=6 + // Select edges 1,2,3,4: (0,2),(0,3),(1,2),(1,4) + // Degrees: 0→2, 1→2, 2→2, 3→1, 4→1 — all ≤ 2 + // Connected and n-1=4 edges → valid spanning tree + assert!(problem.evaluate(&[0, 1, 1, 1, 1, 0, 0])); +} + +#[test] +fn test_degree_constrained_spanning_tree_evaluate_invalid_degree() { + let problem = example_instance(); + // Select edges 0,1,2,4: (0,1),(0,2),(0,3),(1,4) + // Degrees: 0→3 (exceeds K=2) + assert!(!problem.evaluate(&[1, 1, 1, 0, 1, 0, 0])); +} + +#[test] +fn test_degree_constrained_spanning_tree_evaluate_not_tree() { + let problem = example_instance(); + // Select only 3 edges (not enough for n-1=4) + assert!(!problem.evaluate(&[1, 1, 1, 0, 0, 0, 0])); + // Select 5 edges (too many) + assert!(!problem.evaluate(&[1, 1, 1, 1, 1, 0, 0])); +} + +#[test] +fn test_degree_constrained_spanning_tree_evaluate_disconnected() { + let problem = example_instance(); + // Select edges (0,1),(0,2),(0,3),(3,4) → 4 edges, but check degree: + // 0→3 edges — degree exceeds K=2, so fails on degree. + // Try: (0,1),(2,3),(1,4),(3,4) → indices 0,5,4,6 + // Degrees: 0→1, 1→2, 2→1, 3→3 — degree exceeds K=2 + // Need to pick 4 edges forming a tree where no vertex has degree > 2. + // edges (0,2),(2,3),(3,4),(1,4) → indices 1,5,6,4 + // Degrees: 0→1, 1→1, 2→2, 3→2, 4→2 → valid and connected! + assert!(problem.evaluate(&[0, 1, 0, 0, 1, 1, 1])); +} + +#[test] +fn test_degree_constrained_spanning_tree_evaluate_wrong_length() { + let problem = example_instance(); + assert!(!problem.evaluate(&[0, 1, 0])); + assert!(!problem.evaluate(&[0, 1, 0, 0, 1, 0, 0, 1])); +} + +#[test] +fn test_degree_constrained_spanning_tree_brute_force() { + let problem = example_instance(); + let solver = BruteForce::new(); + let solution = solver.find_witness(&problem); + assert!(solution.is_some()); + assert!(problem.evaluate(&solution.unwrap())); +} + +#[test] +fn test_degree_constrained_spanning_tree_infeasible() { + // Star graph K_{1,4}: vertex 0 connected to 1,2,3,4. + // Only spanning tree is the star itself, which has degree 4 at vertex 0. + // With K=2, no spanning tree exists. + let problem = DegreeConstrainedSpanningTree::new( + SimpleGraph::new(5, vec![(0, 1), (0, 2), (0, 3), (0, 4)]), + 2, + ); + let solver = BruteForce::new(); + assert!(solver.find_witness(&problem).is_none()); +} + +#[test] +fn test_degree_constrained_spanning_tree_k1_path() { + // K=1 means the tree is a single edge for n=2. + // For n>2, K=1 is impossible since a tree on n>=3 vertices must have max degree >= 2. + let problem = + DegreeConstrainedSpanningTree::new(SimpleGraph::new(3, vec![(0, 1), (1, 2), (0, 2)]), 1); + let solver = BruteForce::new(); + assert!(solver.find_witness(&problem).is_none()); + + // For n=2, K=1 works: the single edge is the tree. + let problem2 = DegreeConstrainedSpanningTree::new(SimpleGraph::new(2, vec![(0, 1)]), 1); + let solver2 = BruteForce::new(); + let sol = solver2.find_witness(&problem2); + assert!(sol.is_some()); +} + +#[test] +fn test_degree_constrained_spanning_tree_serialization() { + let problem = example_instance(); + let json = serde_json::to_string(&problem).unwrap(); + let deserialized: DegreeConstrainedSpanningTree = + serde_json::from_str(&json).unwrap(); + assert_eq!(deserialized.num_vertices(), 5); + assert_eq!(deserialized.num_edges(), 7); + assert_eq!(deserialized.max_degree(), 2); +} + +#[test] +#[should_panic(expected = "max_degree must be at least 1")] +fn test_degree_constrained_spanning_tree_zero_k_panics() { + let _ = DegreeConstrainedSpanningTree::new(SimpleGraph::new(3, vec![(0, 1), (1, 2)]), 0); +} diff --git a/src/unit_tests/models/graph/kernel.rs b/src/unit_tests/models/graph/kernel.rs new file mode 100644 index 00000000..270b0fbf --- /dev/null +++ b/src/unit_tests/models/graph/kernel.rs @@ -0,0 +1,106 @@ +use super::*; +use crate::solvers::BruteForce; +use crate::topology::DirectedGraph; +use crate::traits::Problem; + +#[test] +fn test_kernel_creation() { + let graph = DirectedGraph::new( + 5, + vec![(0, 1), (0, 2), (1, 3), (2, 3), (3, 4), (4, 0), (4, 1)], + ); + let problem = Kernel::new(graph); + assert_eq!(problem.num_vertices(), 5); + assert_eq!(problem.num_arcs(), 7); + assert_eq!(problem.dims(), vec![2, 2, 2, 2, 2]); +} + +#[test] +fn test_kernel_evaluate_valid() { + // 5 vertices, arcs: (0,1),(0,2),(1,3),(2,3),(3,4),(4,0),(4,1) + // Kernel: V' = {0, 3} → config [1,0,0,1,0] + // Independence: no arc between 0 and 3 in either direction + // Absorption: 1 has arc to 3 (selected), 2 has arc to 3 (selected), 4 has arc to 0 (selected) + let graph = DirectedGraph::new( + 5, + vec![(0, 1), (0, 2), (1, 3), (2, 3), (3, 4), (4, 0), (4, 1)], + ); + let problem = Kernel::new(graph); + assert_eq!(problem.evaluate(&[1, 0, 0, 1, 0]), crate::types::Or(true)); +} + +#[test] +fn test_kernel_evaluate_not_independent() { + // Select vertices 0 and 1, but there is arc (0,1), so not independent + let graph = DirectedGraph::new( + 5, + vec![(0, 1), (0, 2), (1, 3), (2, 3), (3, 4), (4, 0), (4, 1)], + ); + let problem = Kernel::new(graph); + assert_eq!(problem.evaluate(&[1, 1, 0, 0, 0]), crate::types::Or(false)); +} + +#[test] +fn test_kernel_evaluate_not_absorbing() { + // Select only vertex 0: vertex 1 has no arc to 0 (only 0->1 exists), so not absorbing + // Actually, let's check: successors of 1 = {3}, and 3 is not selected + // successors of 2 = {3}, not selected + // successors of 3 = {4}, not selected + // successors of 4 = {0, 1}, 0 is selected → ok for 4 + // But 1,2,3 cannot reach any selected vertex → not absorbing + let graph = DirectedGraph::new( + 5, + vec![(0, 1), (0, 2), (1, 3), (2, 3), (3, 4), (4, 0), (4, 1)], + ); + let problem = Kernel::new(graph); + assert_eq!(problem.evaluate(&[1, 0, 0, 0, 0]), crate::types::Or(false)); +} + +#[test] +fn test_kernel_brute_force() { + let graph = DirectedGraph::new( + 5, + vec![(0, 1), (0, 2), (1, 3), (2, 3), (3, 4), (4, 0), (4, 1)], + ); + let problem = Kernel::new(graph); + let solver = BruteForce::new(); + let solution = solver.find_witness(&problem).expect("should have a kernel"); + assert_eq!(problem.evaluate(&solution), crate::types::Or(true)); +} + +#[test] +fn test_kernel_no_solution() { + // A directed 3-cycle has no kernel: + // 0->1, 1->2, 2->0 + // Any single vertex is not absorbing (e.g., {0}: vertex 2 has successor 0, ok; vertex 1 has successor 2, not selected → fail) + // Wait, let's verify: {0}: successors of 1 = {2}, not selected. Not absorbing. + // {0,1}: arc (0,1) exists. Not independent. + // No kernel exists for odd cycles. + let graph = DirectedGraph::new(3, vec![(0, 1), (1, 2), (2, 0)]); + let problem = Kernel::new(graph); + let solver = BruteForce::new(); + assert!(solver.find_witness(&problem).is_none()); +} + +#[test] +fn test_kernel_serialization() { + let graph = DirectedGraph::new(3, vec![(0, 1), (1, 2)]); + let problem = Kernel::new(graph); + let json = serde_json::to_value(&problem).unwrap(); + let deserialized: Kernel = serde_json::from_value(json).unwrap(); + assert_eq!(deserialized.num_vertices(), 3); + assert_eq!(deserialized.num_arcs(), 2); +} + +#[test] +fn test_kernel_empty_graph() { + // A graph with no arcs: every vertex is independent; absorption requires + // every unselected vertex to have an arc to a selected one, but no arcs exist. + // So the only kernel is the full vertex set (all selected → no unselected vertices to check). + let graph = DirectedGraph::new(3, vec![]); + let problem = Kernel::new(graph); + // All selected: independent (no arcs), absorbing (no unselected vertices) + assert_eq!(problem.evaluate(&[1, 1, 1]), crate::types::Or(true)); + // Not all selected: e.g., {0} → vertex 1 has no arc to 0, not absorbing + assert_eq!(problem.evaluate(&[1, 0, 0]), crate::types::Or(false)); +} diff --git a/src/unit_tests/models/graph/maximum_achromatic_number.rs b/src/unit_tests/models/graph/maximum_achromatic_number.rs new file mode 100644 index 00000000..4b0c2a7c --- /dev/null +++ b/src/unit_tests/models/graph/maximum_achromatic_number.rs @@ -0,0 +1,96 @@ +use super::*; +use crate::solvers::BruteForce; +use crate::topology::SimpleGraph; +use crate::traits::Problem; +use crate::types::Max; + +#[test] +fn test_maximum_achromatic_number_c6() { + // C6 (6-cycle): achromatic number is 3 + let graph = SimpleGraph::new(6, vec![(0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (5, 0)]); + let problem = MaximumAchromaticNumber::new(graph); + assert_eq!(problem.num_vertices(), 6); + assert_eq!(problem.num_edges(), 6); + assert_eq!(problem.dims(), vec![6; 6]); + + // [0,1,2,0,1,2] is a valid complete proper 3-coloring + let config = vec![0, 1, 2, 0, 1, 2]; + assert_eq!(problem.evaluate(&config), Max(Some(3))); +} + +#[test] +fn test_maximum_achromatic_number_improper_coloring() { + // Adjacent vertices with the same color -> invalid + let graph = SimpleGraph::new(4, vec![(0, 1), (1, 2), (2, 3)]); + let problem = MaximumAchromaticNumber::new(graph); + + // Vertices 0 and 1 are adjacent and share color 0 + assert_eq!(problem.evaluate(&[0, 0, 1, 2]), Max(None)); +} + +#[test] +fn test_maximum_achromatic_number_incomplete_coloring() { + // Proper but not complete: color pair with no connecting edge -> invalid + let graph = SimpleGraph::new(4, vec![(0, 1), (2, 3)]); + let problem = MaximumAchromaticNumber::new(graph); + + // Colors: 0->0, 1->1, 2->2, 3->3 — proper (no adjacent same color) + // But colors 0 and 2 have no edge between them, etc. -> incomplete + assert_eq!(problem.evaluate(&[0, 1, 2, 3]), Max(None)); + + // Colors: 0->0, 1->1, 2->0, 3->1 — proper (edges 0-1 and 2-3 have different colors) + // Colors 0 and 1 have edges (0,1) and (2,3) -> complete + assert_eq!(problem.evaluate(&[0, 1, 0, 1]), Max(Some(2))); +} + +#[test] +fn test_maximum_achromatic_number_solver() { + // Small graph: path P3 (0-1-2) + // Possible colorings: + // [0,1,0] -> 2 colors, proper, complete (edge between 0 and 1 classes) -> Max(2) + // [0,1,2] -> 3 colors, proper, but colors 0 and 2 have no edge -> incomplete + let graph = SimpleGraph::new(3, vec![(0, 1), (1, 2)]); + let problem = MaximumAchromaticNumber::new(graph); + + let solver = BruteForce::new(); + let solution = solver.find_witness(&problem).unwrap(); + let value = problem.evaluate(&solution); + assert_eq!(value, Max(Some(2))); +} + +#[test] +fn test_maximum_achromatic_number_wrong_length() { + let graph = SimpleGraph::new(3, vec![(0, 1), (1, 2)]); + let problem = MaximumAchromaticNumber::new(graph); + assert_eq!(problem.evaluate(&[0, 1]), Max(None)); +} + +#[test] +fn test_maximum_achromatic_number_empty_graph() { + // No vertices, no edges + let graph = SimpleGraph::new(0, vec![]); + let problem = MaximumAchromaticNumber::new(graph); + assert_eq!(problem.evaluate(&[]), Max(Some(0))); +} + +#[test] +fn test_maximum_achromatic_number_single_vertex() { + // Single vertex, no edges: 1 color, trivially complete + let graph = SimpleGraph::new(1, vec![]); + let problem = MaximumAchromaticNumber::new(graph); + assert_eq!(problem.evaluate(&[0]), Max(Some(1))); +} + +#[test] +fn test_maximum_achromatic_number_complete_graph_k3() { + // K3: achromatic number = 3 (each vertex gets its own color) + let graph = SimpleGraph::new(3, vec![(0, 1), (1, 2), (0, 2)]); + let problem = MaximumAchromaticNumber::new(graph); + + // 3 colors: proper and complete (every color pair has an edge) + assert_eq!(problem.evaluate(&[0, 1, 2]), Max(Some(3))); + + let solver = BruteForce::new(); + let solution = solver.find_witness(&problem).unwrap(); + assert_eq!(problem.evaluate(&solution), Max(Some(3))); +} diff --git a/src/unit_tests/models/graph/minimum_covering_by_cliques.rs b/src/unit_tests/models/graph/minimum_covering_by_cliques.rs new file mode 100644 index 00000000..778a5c49 --- /dev/null +++ b/src/unit_tests/models/graph/minimum_covering_by_cliques.rs @@ -0,0 +1,132 @@ +use super::*; +use crate::solvers::BruteForce; +use crate::topology::SimpleGraph; +use crate::traits::Problem; +use crate::types::Min; + +#[test] +fn test_minimum_covering_by_cliques_creation() { + let graph = SimpleGraph::new(4, vec![(0, 1), (1, 2), (2, 3)]); + let problem = MinimumCoveringByCliques::new(graph); + assert_eq!(problem.num_vertices(), 4); + assert_eq!(problem.num_edges(), 3); + assert_eq!(problem.num_variables(), 3); + // Each edge can be assigned to one of 3 groups + assert_eq!(problem.dims(), vec![3; 3]); +} + +#[test] +fn test_minimum_covering_by_cliques_triangle() { + // Triangle: all 3 edges form a single clique -> 1 group suffices + let graph = SimpleGraph::new(3, vec![(0, 1), (1, 2), (0, 2)]); + let problem = MinimumCoveringByCliques::new(graph); + + // All edges in group 0 -> valid, 1 clique + assert_eq!(problem.evaluate(&[0, 0, 0]), Min(Some(1))); + + let solver = BruteForce::new(); + let solution = solver.find_witness(&problem).unwrap(); + let value = problem.evaluate(&solution); + assert_eq!(value, Min(Some(1))); +} + +#[test] +fn test_minimum_covering_by_cliques_path() { + // Path 0-1-2: edges (0,1) and (1,2) are each individual cliques (K2) + // but cannot be combined into one clique since 0 and 2 are not adjacent. + let graph = SimpleGraph::new(3, vec![(0, 1), (1, 2)]); + let problem = MinimumCoveringByCliques::new(graph); + + // Both edges in the same group -> invalid (0 and 2 not adjacent) + assert_eq!(problem.evaluate(&[0, 0]), Min(None)); + + // Two separate groups -> valid, 2 cliques + assert_eq!(problem.evaluate(&[0, 1]), Min(Some(2))); + + let solver = BruteForce::new(); + let solution = solver.find_witness(&problem).unwrap(); + assert_eq!(problem.evaluate(&solution), Min(Some(2))); +} + +#[test] +fn test_minimum_covering_by_cliques_invalid_group() { + // Square: 0-1-2-3-0, edges (0,1),(1,2),(2,3),(3,0) + // Putting non-adjacent-endpoint edges in same group is invalid + let graph = SimpleGraph::new(4, vec![(0, 1), (1, 2), (2, 3), (3, 0)]); + let problem = MinimumCoveringByCliques::new(graph); + + // Edges (0,1) and (2,3) in same group: vertices {0,1,2,3}, not a clique + assert_eq!(problem.evaluate(&[0, 1, 0, 1]), Min(None)); + + // Each edge in its own group -> valid, 4 cliques + assert_eq!(problem.evaluate(&[0, 1, 2, 3]), Min(Some(4))); +} + +#[test] +fn test_minimum_covering_by_cliques_empty_graph() { + // No edges: 0 cliques needed + let graph = SimpleGraph::new(3, vec![]); + let problem = MinimumCoveringByCliques::new(graph); + assert_eq!(problem.evaluate(&[]), Min(Some(0))); +} + +#[test] +fn test_minimum_covering_by_cliques_wrong_length() { + let graph = SimpleGraph::new(3, vec![(0, 1), (1, 2)]); + let problem = MinimumCoveringByCliques::new(graph); + assert_eq!(problem.evaluate(&[0]), Min(None)); +} + +#[test] +fn test_minimum_covering_by_cliques_solver() { + // K4 minus one edge: 4 vertices, 5 edges + // 0-1, 0-2, 0-3, 1-2, 2-3 (missing 1-3) + let graph = SimpleGraph::new(4, vec![(0, 1), (0, 2), (0, 3), (1, 2), (2, 3)]); + let problem = MinimumCoveringByCliques::new(graph); + + let solver = BruteForce::new(); + let solution = solver.find_witness(&problem).unwrap(); + let value = problem.evaluate(&solution); + // Two triangles: {0,1,2} and {0,2,3} cover all 5 edges + assert_eq!(value, Min(Some(2))); +} + +#[test] +fn test_minimum_covering_by_cliques_is_valid_cover() { + let graph = SimpleGraph::new(3, vec![(0, 1), (1, 2), (0, 2)]); + let problem = MinimumCoveringByCliques::new(graph); + + // All in one group (triangle) -> valid + assert!(problem.is_valid_cover(&[0, 0, 0])); + + // Path 0-1 and 1-2 in same group, 0-2 separate: + // Group 0 has vertices {0,1,2} from edges (0,1) and (1,2). + // But 0 and 2 are adjacent in this graph, so {0,1,2} is a clique -> valid + assert!(problem.is_valid_cover(&[0, 0, 1])); +} + +#[test] +fn test_minimum_covering_by_cliques_paper_example() { + // 6 vertices, 9 edges from the canonical example + let graph = SimpleGraph::new( + 6, + vec![ + (0, 1), + (1, 2), + (2, 3), + (3, 0), + (0, 2), + (4, 0), + (4, 1), + (5, 2), + (5, 3), + ], + ); + let problem = MinimumCoveringByCliques::new(graph); + + // The given optimal config + let config = vec![0, 0, 1, 1, 0, 2, 2, 3, 3]; + let result = problem.evaluate(&config); + assert!(result.is_valid()); + assert_eq!(result.unwrap(), 4); +} diff --git a/src/unit_tests/models/graph/minimum_geometric_connected_dominating_set.rs b/src/unit_tests/models/graph/minimum_geometric_connected_dominating_set.rs new file mode 100644 index 00000000..91bccdce --- /dev/null +++ b/src/unit_tests/models/graph/minimum_geometric_connected_dominating_set.rs @@ -0,0 +1,128 @@ +use super::*; +use crate::solvers::BruteForce; +use crate::traits::Problem; + +#[test] +fn test_creation_and_getters() { + let points = vec![(0.0, 0.0), (1.0, 0.0), (2.0, 0.0)]; + let problem = MinimumGeometricConnectedDominatingSet::new(points, 1.5); + assert_eq!(problem.num_points(), 3); + assert!((problem.radius() - 1.5).abs() < f64::EPSILON); + assert_eq!(problem.points().len(), 3); + assert_eq!(problem.num_variables(), 3); + assert_eq!(problem.dims(), vec![2; 3]); +} + +#[test] +#[should_panic(expected = "radius must be positive")] +fn test_negative_radius_panics() { + MinimumGeometricConnectedDominatingSet::new(vec![(0.0, 0.0)], -1.0); +} + +#[test] +#[should_panic(expected = "points must be non-empty")] +fn test_empty_points_panics() { + MinimumGeometricConnectedDominatingSet::new(vec![], 1.0); +} + +#[test] +fn test_try_new_errors() { + assert!(MinimumGeometricConnectedDominatingSet::try_new(vec![], 1.0).is_err()); + assert!(MinimumGeometricConnectedDominatingSet::try_new(vec![(0.0, 0.0)], 0.0).is_err()); + assert!(MinimumGeometricConnectedDominatingSet::try_new(vec![(0.0, 0.0)], -1.0).is_err()); + assert!(MinimumGeometricConnectedDominatingSet::try_new(vec![(0.0, 0.0)], 1.0).is_ok()); +} + +#[test] +fn test_single_point() { + let problem = MinimumGeometricConnectedDominatingSet::new(vec![(0.0, 0.0)], 1.0); + // Selecting the single point is valid + let result = problem.evaluate(&[1]); + assert!(result.is_valid()); + assert_eq!(result.unwrap(), 1); + // Not selecting is invalid (empty set) + let result = problem.evaluate(&[0]); + assert!(!result.is_valid()); +} + +#[test] +fn test_evaluate_domination_failure() { + // Two points far apart, radius too small + let problem = MinimumGeometricConnectedDominatingSet::new(vec![(0.0, 0.0), (10.0, 0.0)], 1.0); + // Only select first point: second point not dominated + let result = problem.evaluate(&[1, 0]); + assert!(!result.is_valid()); +} + +#[test] +fn test_evaluate_connectivity_failure() { + // Three points in a line, select endpoints but not middle + // With radius=1.5, each point covers its neighbor but endpoints aren't connected + let problem = + MinimumGeometricConnectedDominatingSet::new(vec![(0.0, 0.0), (1.0, 0.0), (2.0, 0.0)], 1.5); + // Select points 0 and 2 (not connected to each other, distance = 2.0 > 1.5) + let result = problem.evaluate(&[1, 0, 1]); + assert!(!result.is_valid()); +} + +#[test] +fn test_evaluate_valid_connected_dominating_set() { + // Three collinear points, select first two + let problem = + MinimumGeometricConnectedDominatingSet::new(vec![(0.0, 0.0), (1.0, 0.0), (2.0, 0.0)], 1.5); + // Select 0 and 1: they are connected (dist=1.0 <= 1.5), and point 2 is dominated by point 1 (dist=1.0 <= 1.5) + let result = problem.evaluate(&[1, 1, 0]); + assert!(result.is_valid()); + assert_eq!(result.unwrap(), 2); +} + +#[test] +fn test_brute_force_line_graph() { + // Line of 3 points, middle point dominates all + let problem = + MinimumGeometricConnectedDominatingSet::new(vec![(0.0, 0.0), (1.0, 0.0), (2.0, 0.0)], 1.5); + let solver = BruteForce::new(); + let witness = solver.find_witness(&problem).unwrap(); + let value = problem.evaluate(&witness).unwrap(); + // Middle point alone dominates all and is trivially connected + assert_eq!(value, 1); +} + +#[test] +fn test_ladder_example() { + // 8 points in a ladder: [(0,0),(3,0),(6,0),(9,0),(0,3),(3,3),(6,3),(9,3)], B=3.5 + let problem = MinimumGeometricConnectedDominatingSet::new( + vec![ + (0.0, 0.0), + (3.0, 0.0), + (6.0, 0.0), + (9.0, 0.0), + (0.0, 3.0), + (3.0, 3.0), + (6.0, 3.0), + (9.0, 3.0), + ], + 3.5, + ); + // Bottom row selected: config [1,1,1,1,0,0,0,0] + let config = vec![1, 1, 1, 1, 0, 0, 0, 0]; + let result = problem.evaluate(&config); + assert!(result.is_valid()); + assert_eq!(result.unwrap(), 4); + + // Verify with brute force + let solver = BruteForce::new(); + let witness = solver.find_witness(&problem).unwrap(); + let best_value = problem.evaluate(&witness).unwrap(); + assert_eq!(best_value, 4); +} + +#[test] +fn test_serialization_roundtrip() { + let problem = + MinimumGeometricConnectedDominatingSet::new(vec![(0.0, 0.0), (1.0, 1.0), (2.0, 0.0)], 2.0); + let json = serde_json::to_string(&problem).unwrap(); + let deserialized: MinimumGeometricConnectedDominatingSet = serde_json::from_str(&json).unwrap(); + assert_eq!(deserialized.num_points(), 3); + assert!((deserialized.radius() - 2.0).abs() < f64::EPSILON); +} diff --git a/src/unit_tests/models/graph/minimum_intersection_graph_basis.rs b/src/unit_tests/models/graph/minimum_intersection_graph_basis.rs new file mode 100644 index 00000000..3f83ae34 --- /dev/null +++ b/src/unit_tests/models/graph/minimum_intersection_graph_basis.rs @@ -0,0 +1,110 @@ +use super::*; +use crate::solvers::BruteForce; +use crate::topology::SimpleGraph; +use crate::traits::Problem; +use crate::types::Min; + +#[test] +fn test_minimum_intersection_graph_basis_creation() { + let graph = SimpleGraph::new(3, vec![(0, 1), (1, 2)]); + let problem = MinimumIntersectionGraphBasis::new(graph); + assert_eq!(problem.num_vertices(), 3); + assert_eq!(problem.num_edges(), 2); + // 3 vertices * 2 edges = 6 binary variables + assert_eq!(problem.num_variables(), 6); + assert_eq!(problem.dims(), vec![2; 6]); +} + +#[test] +fn test_minimum_intersection_graph_basis_p3() { + // Path P3: 0-1-2, edges (0,1) and (1,2) + // Intersection number = 2: S[0]={0}, S[1]={0,1}, S[2]={1} + let graph = SimpleGraph::new(3, vec![(0, 1), (1, 2)]); + let problem = MinimumIntersectionGraphBasis::new(graph); + + // Valid config: S[0]={0}, S[1]={0,1}, S[2]={1} -> [1,0, 1,1, 0,1] + let config = vec![1, 0, 1, 1, 0, 1]; + assert_eq!(problem.evaluate(&config), Min(Some(2))); + + // Brute force should find optimal = 2 + let solver = BruteForce::new(); + let solution = solver.find_witness(&problem).unwrap(); + assert_eq!(problem.evaluate(&solution), Min(Some(2))); +} + +#[test] +fn test_minimum_intersection_graph_basis_single_edge() { + // Single edge: 0-1 + // Intersection number = 1: S[0]={0}, S[1]={0} + let graph = SimpleGraph::new(2, vec![(0, 1)]); + let problem = MinimumIntersectionGraphBasis::new(graph); + + // Valid: S[0]={0}, S[1]={0} -> [1, 1] + assert_eq!(problem.evaluate(&[1, 1]), Min(Some(1))); + + // Invalid: S[0]={}, S[1]={0} -> edge not covered + assert_eq!(problem.evaluate(&[0, 1]), Min(None)); + + let solver = BruteForce::new(); + let solution = solver.find_witness(&problem).unwrap(); + assert_eq!(problem.evaluate(&solution), Min(Some(1))); +} + +#[test] +fn test_minimum_intersection_graph_basis_triangle() { + // Triangle K3: edges (0,1),(1,2),(0,2) + // Intersection number = 1 for K3: all vertices share one element. + // S[0]={0}, S[1]={0}, S[2]={0} — all pairs intersect, which matches K3. + let graph = SimpleGraph::new(3, vec![(0, 1), (1, 2), (0, 2)]); + let problem = MinimumIntersectionGraphBasis::new(graph); + + // 3 vertices * 3 edges = 9 binary variables + assert_eq!(problem.dims(), vec![2; 9]); + + // Valid: S[0]={0}, S[1]={0}, S[2]={0} + // config: v0: [1,0,0], v1: [1,0,0], v2: [1,0,0] + let config = vec![1, 0, 0, 1, 0, 0, 1, 0, 0]; + assert_eq!(problem.evaluate(&config), Min(Some(1))); + + let solver = BruteForce::new(); + let solution = solver.find_witness(&problem).unwrap(); + assert_eq!(problem.evaluate(&solution), Min(Some(1))); +} + +#[test] +fn test_minimum_intersection_graph_basis_empty_graph() { + // No edges: universe size 0 + let graph = SimpleGraph::new(3, vec![]); + let problem = MinimumIntersectionGraphBasis::new(graph); + assert_eq!(problem.evaluate(&[]), Min(Some(0))); +} + +#[test] +fn test_minimum_intersection_graph_basis_wrong_length() { + let graph = SimpleGraph::new(3, vec![(0, 1), (1, 2)]); + let problem = MinimumIntersectionGraphBasis::new(graph); + assert_eq!(problem.evaluate(&[1, 0, 1]), Min(None)); +} + +#[test] +fn test_minimum_intersection_graph_basis_invalid_nonadjacent_intersect() { + // P3: edges (0,1),(1,2). Vertices 0 and 2 are NOT adjacent. + // If S[0] and S[2] intersect, it's invalid. + let graph = SimpleGraph::new(3, vec![(0, 1), (1, 2)]); + let problem = MinimumIntersectionGraphBasis::new(graph); + + // S[0]={0,1}, S[1]={0,1}, S[2]={0,1} -> 0 and 2 share elements -> invalid + let config = vec![1, 1, 1, 1, 1, 1]; + assert_eq!(problem.evaluate(&config), Min(None)); +} + +#[test] +fn test_minimum_intersection_graph_basis_invalid_edge_not_covered() { + // P3: edges (0,1),(1,2). + // S[0]={0}, S[1]={1}, S[2]={1} -> edge (0,1) not covered (no intersection) + let graph = SimpleGraph::new(3, vec![(0, 1), (1, 2)]); + let problem = MinimumIntersectionGraphBasis::new(graph); + + let config = vec![1, 0, 0, 1, 0, 1]; + assert_eq!(problem.evaluate(&config), Min(None)); +} diff --git a/src/unit_tests/models/graph/monochromatic_triangle.rs b/src/unit_tests/models/graph/monochromatic_triangle.rs new file mode 100644 index 00000000..26cda983 --- /dev/null +++ b/src/unit_tests/models/graph/monochromatic_triangle.rs @@ -0,0 +1,112 @@ +use super::*; +use crate::solvers::BruteForce; +use crate::topology::SimpleGraph; +use crate::traits::Problem; + +fn k4_instance() -> MonochromaticTriangle { + // K4: complete graph on 4 vertices, 6 edges + MonochromaticTriangle::new(SimpleGraph::new( + 4, + vec![(0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3)], + )) +} + +#[test] +fn test_monochromatic_triangle_creation() { + let problem = k4_instance(); + assert_eq!(problem.num_vertices(), 4); + assert_eq!(problem.num_edges(), 6); + // K4 has 4 triangles + assert_eq!(problem.triangles().len(), 4); + // One binary variable per edge + assert_eq!(problem.dims(), vec![2; 6]); + assert_eq!(problem.graph().num_vertices(), 4); +} + +#[test] +fn test_monochromatic_triangle_evaluate_valid() { + let problem = k4_instance(); + // Edges: (0,1),(0,2),(0,3),(1,2),(1,3),(2,3) + // Config [0,0,1,1,0,1]: + // Triangle (0,1,2): edges 0,1,3 -> 0,0,1 -> mixed + // Triangle (0,1,3): edges 0,2,4 -> 0,1,0 -> mixed + // Triangle (0,2,3): edges 1,2,5 -> 0,1,1 -> mixed + // Triangle (1,2,3): edges 3,4,5 -> 1,0,1 -> mixed + assert!(problem.evaluate(&[0, 0, 1, 1, 0, 1])); +} + +#[test] +fn test_monochromatic_triangle_evaluate_invalid() { + let problem = k4_instance(); + // All edges color 0: every triangle is monochromatic + assert!(!problem.evaluate(&[0, 0, 0, 0, 0, 0])); + // All edges color 1: every triangle is monochromatic + assert!(!problem.evaluate(&[1, 1, 1, 1, 1, 1])); +} + +#[test] +fn test_monochromatic_triangle_evaluate_wrong_length() { + let problem = k4_instance(); + assert!(!problem.evaluate(&[0, 1, 0])); + assert!(!problem.evaluate(&[0, 1, 0, 0, 1, 1, 0])); +} + +#[test] +fn test_monochromatic_triangle_triangle_free_graph() { + // A path graph 0-1-2 has no triangles, so any coloring is valid. + let problem = MonochromaticTriangle::new(SimpleGraph::new(3, vec![(0, 1), (1, 2)])); + assert_eq!(problem.triangles().len(), 0); + assert!(problem.evaluate(&[0, 0])); + assert!(problem.evaluate(&[1, 1])); + assert!(problem.evaluate(&[0, 1])); +} + +#[test] +fn test_monochromatic_triangle_brute_force_k4() { + let problem = k4_instance(); + let solver = BruteForce::new(); + let solution = solver.find_witness(&problem); + assert!(solution.is_some()); + assert!(problem.evaluate(&solution.unwrap())); +} + +#[test] +fn test_monochromatic_triangle_brute_force_k6_no_solution() { + // By Ramsey theory R(3,3)=6, K6 has no 2-coloring avoiding monochromatic triangles. + let mut edges = Vec::new(); + for u in 0..6 { + for v in (u + 1)..6 { + edges.push((u, v)); + } + } + let problem = MonochromaticTriangle::new(SimpleGraph::new(6, edges)); + assert_eq!(problem.num_edges(), 15); + let solver = BruteForce::new(); + assert!(solver.find_witness(&problem).is_none()); +} + +#[test] +fn test_monochromatic_triangle_brute_force_k5_has_solution() { + // K5 has valid 2-colorings (R(3,3)=6, so K5 can be 2-colored). + let mut edges = Vec::new(); + for u in 0..5 { + for v in (u + 1)..5 { + edges.push((u, v)); + } + } + let problem = MonochromaticTriangle::new(SimpleGraph::new(5, edges)); + let solver = BruteForce::new(); + let solution = solver.find_witness(&problem); + assert!(solution.is_some()); + assert!(problem.evaluate(&solution.unwrap())); +} + +#[test] +fn test_monochromatic_triangle_serialization() { + let problem = k4_instance(); + let json = serde_json::to_string(&problem).unwrap(); + let deserialized: MonochromaticTriangle = serde_json::from_str(&json).unwrap(); + assert_eq!(deserialized.num_vertices(), 4); + assert_eq!(deserialized.num_edges(), 6); + assert_eq!(deserialized.triangles().len(), 4); +} diff --git a/src/unit_tests/models/graph/partition_into_cliques.rs b/src/unit_tests/models/graph/partition_into_cliques.rs new file mode 100644 index 00000000..0f5fff26 --- /dev/null +++ b/src/unit_tests/models/graph/partition_into_cliques.rs @@ -0,0 +1,122 @@ +use super::*; +use crate::solvers::BruteForce; +use crate::topology::SimpleGraph; +use crate::traits::Problem; + +fn two_triangle_instance() -> PartitionIntoCliques { + // Two triangles: 0-1-2 and 3-4-5, plus cross edges 0-3, 1-4, 2-5 + PartitionIntoCliques::new( + SimpleGraph::new( + 6, + vec![ + (0, 1), + (0, 2), + (1, 2), + (3, 4), + (3, 5), + (4, 5), + (0, 3), + (1, 4), + (2, 5), + ], + ), + 3, + ) +} + +#[test] +fn test_partition_into_cliques_creation() { + let problem = two_triangle_instance(); + assert_eq!(problem.num_vertices(), 6); + assert_eq!(problem.num_edges(), 9); + assert_eq!(problem.num_cliques(), 3); + assert_eq!(problem.dims(), vec![3; 6]); + assert_eq!(problem.graph().num_vertices(), 6); +} + +#[test] +fn test_partition_into_cliques_evaluate_positive() { + let problem = two_triangle_instance(); + + // Group 0 = {0,1,2} (triangle), Group 1 = {3,4,5} (triangle) + assert!(problem.evaluate(&[0, 0, 0, 1, 1, 1])); + + // Each vertex in its own group (trivially valid) + assert!(problem.evaluate(&[0, 1, 2, 0, 1, 2])); +} + +#[test] +fn test_partition_into_cliques_evaluate_negative() { + let problem = two_triangle_instance(); + + // Group 0 = {0,1,2,3}: 0-1, 0-2, 1-2, 0-3 present, but 1-3 missing + assert!(!problem.evaluate(&[0, 0, 0, 0, 1, 2])); +} + +#[test] +fn test_partition_into_cliques_evaluate_wrong_config_length() { + let problem = two_triangle_instance(); + assert!(!problem.evaluate(&[0, 1, 0])); + assert!(!problem.evaluate(&[0, 1, 0, 0, 1, 1, 0])); +} + +#[test] +fn test_partition_into_cliques_evaluate_out_of_range_group() { + let problem = two_triangle_instance(); + // Group 3 doesn't exist (num_cliques=3, valid groups are 0,1,2) + assert!(!problem.evaluate(&[0, 1, 3, 0, 1, 2])); +} + +#[test] +fn test_partition_into_cliques_brute_force_finds_solution() { + // Complete graph K4, K=2: can partition into two cliques + let problem = PartitionIntoCliques::new( + SimpleGraph::new(4, vec![(0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3)]), + 2, + ); + let solver = BruteForce::new(); + let solution = solver.find_witness(&problem); + assert!(solution.is_some()); + assert!(problem.evaluate(&solution.unwrap())); +} + +#[test] +fn test_partition_into_cliques_brute_force_no_solution() { + // Path 0-1-2, K=1: {0,1,2} not a clique (missing edge 0-2) + let problem = PartitionIntoCliques::new(SimpleGraph::new(3, vec![(0, 1), (1, 2)]), 1); + let solver = BruteForce::new(); + assert!(solver.find_witness(&problem).is_none()); +} + +#[test] +fn test_partition_into_cliques_brute_force_all_valid() { + // Complete graph K3, K=3: every assignment is valid + let problem = PartitionIntoCliques::new(SimpleGraph::new(3, vec![(0, 1), (0, 2), (1, 2)]), 3); + let solutions = BruteForce::new().find_all_witnesses(&problem); + assert!(!solutions.is_empty()); + for sol in &solutions { + assert!(problem.evaluate(sol)); + } +} + +#[test] +fn test_partition_into_cliques_serialization() { + let problem = two_triangle_instance(); + let json = serde_json::to_string(&problem).unwrap(); + let deserialized: PartitionIntoCliques = serde_json::from_str(&json).unwrap(); + assert_eq!(deserialized.num_vertices(), 6); + assert_eq!(deserialized.num_edges(), 9); + assert_eq!(deserialized.num_cliques(), 3); +} + +#[test] +#[should_panic(expected = "num_cliques must be at least 1")] +fn test_partition_into_cliques_rejects_zero() { + let _ = PartitionIntoCliques::new(SimpleGraph::new(2, vec![(0, 1)]), 0); +} + +#[test] +#[should_panic(expected = "num_cliques must be at most num_vertices")] +fn test_partition_into_cliques_rejects_too_many() { + let _ = PartitionIntoCliques::new(SimpleGraph::new(2, vec![(0, 1)]), 3); +} diff --git a/src/unit_tests/models/graph/partition_into_perfect_matchings.rs b/src/unit_tests/models/graph/partition_into_perfect_matchings.rs new file mode 100644 index 00000000..d9a5a32e --- /dev/null +++ b/src/unit_tests/models/graph/partition_into_perfect_matchings.rs @@ -0,0 +1,115 @@ +use super::*; +use crate::solvers::BruteForce; +use crate::topology::SimpleGraph; +use crate::traits::Problem; + +fn four_vertex_instance() -> PartitionIntoPerfectMatchings { + // 4 vertices with edges: (0,1),(2,3),(0,2),(1,3) + PartitionIntoPerfectMatchings::new(SimpleGraph::new(4, vec![(0, 1), (2, 3), (0, 2), (1, 3)]), 2) +} + +#[test] +fn test_partition_into_perfect_matchings_creation() { + let problem = four_vertex_instance(); + assert_eq!(problem.num_vertices(), 4); + assert_eq!(problem.num_edges(), 4); + assert_eq!(problem.num_matchings(), 2); + assert_eq!(problem.dims(), vec![2; 4]); + assert_eq!(problem.graph().num_vertices(), 4); +} + +#[test] +fn test_partition_into_perfect_matchings_evaluate_positive() { + let problem = four_vertex_instance(); + + // Group 0 = {0,1} (edge 0-1), Group 1 = {2,3} (edge 2-3) + assert!(problem.evaluate(&[0, 0, 1, 1])); + + // Group 0 = {0,2} (edge 0-2), Group 1 = {1,3} (edge 1-3) + assert!(problem.evaluate(&[0, 1, 0, 1])); +} + +#[test] +fn test_partition_into_perfect_matchings_evaluate_negative() { + let problem = four_vertex_instance(); + + // Group 0 = {0,1,2}: vertex 0 has neighbors 1 and 2 both in group => degree 2, not 1 + assert!(!problem.evaluate(&[0, 0, 0, 1])); + + // All in one group: each vertex has 2 neighbors in the group + assert!(!problem.evaluate(&[0, 0, 0, 0])); +} + +#[test] +fn test_partition_into_perfect_matchings_evaluate_odd_group() { + // A group with an odd number of members can never be a perfect matching + let problem = four_vertex_instance(); + // Group 0 = {0,1,2} (3 vertices), Group 1 = {3} (1 vertex) + assert!(!problem.evaluate(&[0, 0, 0, 1])); +} + +#[test] +fn test_partition_into_perfect_matchings_evaluate_wrong_config_length() { + let problem = four_vertex_instance(); + assert!(!problem.evaluate(&[0, 1])); + assert!(!problem.evaluate(&[0, 1, 0, 0, 1])); +} + +#[test] +fn test_partition_into_perfect_matchings_evaluate_out_of_range_group() { + let problem = four_vertex_instance(); + // Group 2 doesn't exist (num_matchings=2, valid groups are 0,1) + assert!(!problem.evaluate(&[0, 1, 2, 0])); +} + +#[test] +fn test_partition_into_perfect_matchings_brute_force_finds_solution() { + let problem = four_vertex_instance(); + let solver = BruteForce::new(); + let solution = solver.find_witness(&problem); + assert!(solution.is_some()); + assert!(problem.evaluate(&solution.unwrap())); +} + +#[test] +fn test_partition_into_perfect_matchings_brute_force_no_solution() { + // Path 0-1-2: no perfect matching partition possible with K=1 + // Group {0,1,2} has 3 vertices (odd) so cannot be a perfect matching + let problem = PartitionIntoPerfectMatchings::new(SimpleGraph::new(3, vec![(0, 1), (1, 2)]), 1); + let solver = BruteForce::new(); + assert!(solver.find_witness(&problem).is_none()); +} + +#[test] +fn test_partition_into_perfect_matchings_brute_force_all_valid() { + // 2 vertices with edge (0,1), K=2: group {0,1} is a perfect matching + let problem = PartitionIntoPerfectMatchings::new(SimpleGraph::new(2, vec![(0, 1)]), 2); + let solutions = BruteForce::new().find_all_witnesses(&problem); + assert!(!solutions.is_empty()); + for sol in &solutions { + assert!(problem.evaluate(sol)); + } +} + +#[test] +fn test_partition_into_perfect_matchings_serialization() { + let problem = four_vertex_instance(); + let json = serde_json::to_string(&problem).unwrap(); + let deserialized: PartitionIntoPerfectMatchings = + serde_json::from_str(&json).unwrap(); + assert_eq!(deserialized.num_vertices(), 4); + assert_eq!(deserialized.num_edges(), 4); + assert_eq!(deserialized.num_matchings(), 2); +} + +#[test] +#[should_panic(expected = "num_matchings must be at least 1")] +fn test_partition_into_perfect_matchings_rejects_zero() { + let _ = PartitionIntoPerfectMatchings::new(SimpleGraph::new(2, vec![(0, 1)]), 0); +} + +#[test] +#[should_panic(expected = "num_matchings must be at most num_vertices")] +fn test_partition_into_perfect_matchings_rejects_too_many() { + let _ = PartitionIntoPerfectMatchings::new(SimpleGraph::new(2, vec![(0, 1)]), 3); +} diff --git a/src/unit_tests/models/misc/betweenness.rs b/src/unit_tests/models/misc/betweenness.rs new file mode 100644 index 00000000..1a2d1b4f --- /dev/null +++ b/src/unit_tests/models/misc/betweenness.rs @@ -0,0 +1,134 @@ +use crate::models::misc::Betweenness; +use crate::solvers::BruteForce; +use crate::traits::Problem; +use crate::types::Or; + +fn example_problem() -> Betweenness { + Betweenness::new(5, vec![(0, 1, 2), (2, 3, 4), (0, 2, 4), (1, 3, 4)]) +} + +#[test] +fn test_betweenness_basic() { + let problem = example_problem(); + assert_eq!(problem.num_elements(), 5); + assert_eq!(problem.num_triples(), 4); + assert_eq!( + problem.triples(), + &[(0, 1, 2), (2, 3, 4), (0, 2, 4), (1, 3, 4)] + ); + assert_eq!(problem.dims(), vec![5; 5]); + assert_eq!(problem.num_variables(), 5); + assert_eq!(::NAME, "Betweenness"); + assert_eq!(::variant(), vec![]); +} + +#[test] +fn test_betweenness_evaluate_identity_permutation() { + let problem = example_problem(); + // Identity permutation: element i is at position i + assert_eq!(problem.evaluate(&[0, 1, 2, 3, 4]), Or(true)); +} + +#[test] +fn test_betweenness_evaluate_reverse_permutation() { + let problem = example_problem(); + // Reverse permutation: element i is at position 4-i + assert_eq!(problem.evaluate(&[4, 3, 2, 1, 0]), Or(true)); +} + +#[test] +fn test_betweenness_evaluate_invalid_permutation() { + let problem = example_problem(); + // Not a permutation (duplicate positions) + assert_eq!(problem.evaluate(&[0, 0, 1, 2, 3]), Or(false)); + // Position out of range + assert_eq!(problem.evaluate(&[0, 1, 2, 3, 5]), Or(false)); + // Wrong length + assert_eq!(problem.evaluate(&[0, 1, 2]), Or(false)); +} + +#[test] +fn test_betweenness_evaluate_unsatisfying_permutation() { + let problem = example_problem(); + // Permutation [1, 0, 2, 3, 4]: triple (0,1,2) => f(0)=1, f(1)=0, f(2)=2 + // Need f(0)(invalid).is_err()); + } +} + +#[test] +#[should_panic(expected = "at least one element")] +fn test_betweenness_zero_elements_panics() { + Betweenness::new(0, vec![]); +} + +#[test] +#[should_panic(expected = "out of range")] +fn test_betweenness_element_out_of_range_panics() { + Betweenness::new(3, vec![(0, 1, 5)]); +} + +#[test] +#[should_panic(expected = "duplicate elements")] +fn test_betweenness_duplicate_in_triple_panics() { + Betweenness::new(3, vec![(0, 0, 1)]); +} diff --git a/src/unit_tests/models/misc/cyclic_ordering.rs b/src/unit_tests/models/misc/cyclic_ordering.rs new file mode 100644 index 00000000..12347496 --- /dev/null +++ b/src/unit_tests/models/misc/cyclic_ordering.rs @@ -0,0 +1,131 @@ +use crate::models::misc::CyclicOrdering; +use crate::solvers::BruteForce; +use crate::traits::Problem; +use crate::types::Or; + +fn example_problem() -> CyclicOrdering { + CyclicOrdering::new(5, vec![(0, 1, 2), (2, 3, 0), (1, 3, 4)]) +} + +#[test] +fn test_cyclic_ordering_basic() { + let problem = example_problem(); + assert_eq!(problem.num_elements(), 5); + assert_eq!(problem.num_triples(), 3); + assert_eq!(problem.triples(), &[(0, 1, 2), (2, 3, 0), (1, 3, 4)]); + assert_eq!(problem.dims(), vec![5; 5]); + assert_eq!(problem.num_variables(), 5); + assert_eq!(::NAME, "CyclicOrdering"); + assert_eq!(::variant(), vec![]); +} + +#[test] +fn test_cyclic_ordering_evaluate_satisfying() { + let problem = example_problem(); + // config = [1,3,4,0,2]: f(0)=1, f(1)=3, f(2)=4, f(3)=0, f(4)=2 + // (0,1,2): 1<3<4 ✓ (2,3,0): 0<1<4 (cyclic) ✓ (1,3,4): 0<2<3 (cyclic) ✓ + assert_eq!(problem.evaluate(&[1, 3, 4, 0, 2]), Or(true)); +} + +#[test] +fn test_cyclic_ordering_evaluate_unsatisfying() { + let problem = example_problem(); + // Identity permutation [0,1,2,3,4]: + // (0,1,2): 0<1<2 ✓ (2,3,0): f(2)=2, f(3)=3, f(0)=0 → need + // (2<3<0) or (3<0<2) or (0<2<3). 0<2<3 ✓ + // (1,3,4): f(1)=1, f(3)=3, f(4)=4 → 1<3<4 ✓ + // Actually identity works! Let me pick one that doesn't. + // [0,2,1,3,4]: + // (0,1,2): f(0)=0, f(1)=2, f(2)=1 → (0<2<1)? no. (2<1<0)? no. (1<0<2)? no. → fails + assert_eq!(problem.evaluate(&[0, 2, 1, 3, 4]), Or(false)); +} + +#[test] +fn test_cyclic_ordering_evaluate_invalid_permutation() { + let problem = example_problem(); + // Not a permutation (duplicate positions) + assert_eq!(problem.evaluate(&[0, 0, 1, 2, 3]), Or(false)); + // Position out of range + assert_eq!(problem.evaluate(&[0, 1, 2, 3, 5]), Or(false)); + // Wrong length + assert_eq!(problem.evaluate(&[0, 1, 2]), Or(false)); +} + +#[test] +fn test_cyclic_ordering_solver_finds_witness() { + let problem = example_problem(); + let solver = BruteForce::new(); + let solution = solver.find_witness(&problem).unwrap(); + assert_eq!(problem.evaluate(&solution), Or(true)); +} + +#[test] +fn test_cyclic_ordering_unsatisfiable_instance() { + // With 3 elements, triples (0,1,2) and (0,2,1): + // (0,1,2) requires cyclic order a(invalid).is_err()); + } +} + +#[test] +#[should_panic(expected = "at least one element")] +fn test_cyclic_ordering_zero_elements_panics() { + CyclicOrdering::new(0, vec![]); +} + +#[test] +#[should_panic(expected = "out of range")] +fn test_cyclic_ordering_element_out_of_range_panics() { + CyclicOrdering::new(3, vec![(0, 1, 5)]); +} + +#[test] +#[should_panic(expected = "duplicate elements")] +fn test_cyclic_ordering_duplicate_in_triple_panics() { + CyclicOrdering::new(3, vec![(0, 0, 1)]); +} diff --git a/src/unit_tests/models/misc/dynamic_storage_allocation.rs b/src/unit_tests/models/misc/dynamic_storage_allocation.rs new file mode 100644 index 00000000..92ea4d67 --- /dev/null +++ b/src/unit_tests/models/misc/dynamic_storage_allocation.rs @@ -0,0 +1,152 @@ +use crate::models::misc::DynamicStorageAllocation; +use crate::solvers::BruteForce; +use crate::traits::Problem; +use crate::types::Or; + +fn example_problem() -> DynamicStorageAllocation { + // 5 items, memory_size = 6 + DynamicStorageAllocation::new( + vec![(0, 3, 2), (0, 2, 3), (1, 4, 1), (2, 5, 3), (3, 5, 2)], + 6, + ) +} + +#[test] +fn test_dynamic_storage_allocation_basic() { + let problem = example_problem(); + assert_eq!(problem.num_items(), 5); + assert_eq!(problem.memory_size(), 6); + assert_eq!(problem.items().len(), 5); + // dims: D - s(a) + 1 for each item + // sizes are 2, 3, 1, 3, 2 => dims are 5, 4, 6, 4, 5 + assert_eq!(problem.dims(), vec![5, 4, 6, 4, 5]); + assert_eq!(problem.num_variables(), 5); + assert_eq!( + ::NAME, + "DynamicStorageAllocation" + ); + assert_eq!(::variant(), vec![]); +} + +#[test] +fn test_dynamic_storage_allocation_evaluate_feasible() { + let problem = example_problem(); + // Solution from the issue: σ = [0, 2, 5, 2, 0] (0-indexed) + assert_eq!(problem.evaluate(&[0, 2, 5, 2, 0]), Or(true)); +} + +#[test] +fn test_dynamic_storage_allocation_evaluate_infeasible() { + let problem = example_problem(); + // All items at address 0 - should overlap + assert_eq!(problem.evaluate(&[0, 0, 0, 0, 0]), Or(false)); +} + +#[test] +fn test_dynamic_storage_allocation_rejects_invalid_config_length() { + let problem = example_problem(); + assert_eq!(problem.evaluate(&[0, 2, 5]), Or(false)); + assert_eq!(problem.evaluate(&[0, 2, 5, 2, 0, 1]), Or(false)); +} + +#[test] +fn test_dynamic_storage_allocation_rejects_out_of_bounds() { + let problem = example_problem(); + // Item 0 has size 2, so max start is 4 (0..=4). Start at 5 => 5+2=7 > 6 + assert_eq!(problem.evaluate(&[5, 0, 0, 0, 0]), Or(false)); +} + +#[test] +fn test_dynamic_storage_allocation_solver_finds_witness() { + // Use a small instance for brute-force + let problem = DynamicStorageAllocation::new(vec![(0, 2, 1), (1, 3, 1)], 2); + let solver = BruteForce::new(); + let witness = solver.find_witness(&problem).unwrap(); + assert_eq!(problem.evaluate(&witness), Or(true)); +} + +#[test] +fn test_dynamic_storage_allocation_unsatisfiable_instance() { + // Two items overlap in time, both size 3, memory = 4: can't fit without overlap + let problem = DynamicStorageAllocation::new(vec![(0, 2, 3), (0, 2, 3)], 4); + let solver = BruteForce::new(); + assert!(solver.find_witness(&problem).is_none()); +} + +#[test] +fn test_dynamic_storage_allocation_serialization_round_trip() { + let problem = example_problem(); + let json = serde_json::to_value(&problem).unwrap(); + assert_eq!( + json, + serde_json::json!({ + "items": [[0, 3, 2], [0, 2, 3], [1, 4, 1], [2, 5, 3], [3, 5, 2]], + "memory_size": 6, + }) + ); + + let restored: DynamicStorageAllocation = serde_json::from_value(json).unwrap(); + assert_eq!(restored.items(), problem.items()); + assert_eq!(restored.memory_size(), problem.memory_size()); +} + +#[test] +fn test_dynamic_storage_allocation_deserialization_rejects_invalid() { + let invalid_cases = [ + // Empty items + serde_json::json!({ + "items": [], + "memory_size": 6, + }), + // Zero memory_size + serde_json::json!({ + "items": [[0, 2, 1]], + "memory_size": 0, + }), + // Zero size item + serde_json::json!({ + "items": [[0, 2, 0]], + "memory_size": 6, + }), + // departure <= arrival + serde_json::json!({ + "items": [[3, 2, 1]], + "memory_size": 6, + }), + // size > memory_size + serde_json::json!({ + "items": [[0, 2, 7]], + "memory_size": 6, + }), + ]; + + for invalid in invalid_cases { + assert!(serde_json::from_value::(invalid).is_err()); + } +} + +#[test] +#[should_panic(expected = "at least one item")] +fn test_dynamic_storage_allocation_empty_items_panics() { + DynamicStorageAllocation::new(vec![], 6); +} + +#[test] +#[should_panic(expected = "zero size")] +fn test_dynamic_storage_allocation_zero_size_panics() { + DynamicStorageAllocation::new(vec![(0, 2, 0)], 6); +} + +#[test] +#[should_panic(expected = "departure")] +fn test_dynamic_storage_allocation_bad_departure_panics() { + DynamicStorageAllocation::new(vec![(3, 2, 1)], 6); +} + +#[test] +fn test_dynamic_storage_allocation_non_overlapping_time_any_address() { + // Two items that don't overlap in time can share any addresses + let problem = DynamicStorageAllocation::new(vec![(0, 2, 3), (2, 4, 3)], 3); + // Both at address 0, but they don't overlap in time (d(a)=2 <= r(a')=2) + assert_eq!(problem.evaluate(&[0, 0]), Or(true)); +} diff --git a/src/unit_tests/models/misc/feasible_register_assignment.rs b/src/unit_tests/models/misc/feasible_register_assignment.rs new file mode 100644 index 00000000..4756a2e6 --- /dev/null +++ b/src/unit_tests/models/misc/feasible_register_assignment.rs @@ -0,0 +1,154 @@ +use super::*; +use crate::solvers::BruteForce; +use crate::traits::Problem; + +#[test] +fn test_feasible_register_assignment_basic() { + let problem = + FeasibleRegisterAssignment::new(4, vec![(0, 1), (0, 2), (1, 3)], 2, vec![0, 1, 0, 0]); + assert_eq!(problem.num_vertices(), 4); + assert_eq!(problem.num_arcs(), 3); + assert_eq!(problem.num_registers(), 2); + assert_eq!(problem.arcs(), &[(0, 1), (0, 2), (1, 3)]); + assert_eq!(problem.assignment(), &[0, 1, 0, 0]); + assert_eq!(problem.dims(), vec![4; 4]); + assert_eq!( + ::NAME, + "FeasibleRegisterAssignment" + ); + assert_eq!(::variant(), vec![]); +} + +#[test] +fn test_feasible_register_assignment_evaluate_valid() { + // 4 vertices: v0 depends on v1 and v2, v1 depends on v3 + // K=2, assignment [0, 1, 0, 0] + // Order: v3(pos0), v1(pos1), v2(pos2), v0(pos3) + // config[v] = position + let problem = + FeasibleRegisterAssignment::new(4, vec![(0, 1), (0, 2), (1, 3)], 2, vec![0, 1, 0, 0]); + let config = vec![3, 1, 2, 0]; + assert!(problem.evaluate(&config)); +} + +#[test] +fn test_feasible_register_assignment_evaluate_invalid_permutation() { + let problem = + FeasibleRegisterAssignment::new(4, vec![(0, 1), (0, 2), (1, 3)], 2, vec![0, 1, 0, 0]); + // Not a permutation: position 0 used twice + assert!(!problem.evaluate(&[0, 0, 1, 2])); + // Wrong length + assert!(!problem.evaluate(&[0, 1, 2])); + assert!(!problem.evaluate(&[0, 1, 2, 3, 4])); + // Position out of range + assert!(!problem.evaluate(&[0, 1, 2, 4])); +} + +#[test] +fn test_feasible_register_assignment_evaluate_invalid_dependency() { + // v0 depends on v1, v1 depends on v3 + let problem = + FeasibleRegisterAssignment::new(4, vec![(0, 1), (0, 2), (1, 3)], 2, vec![0, 1, 0, 0]); + // v0 at position 0 but v1 at position 1 -> v0 evaluated before its dependency v1 + assert!(!problem.evaluate(&[0, 1, 2, 3])); +} + +#[test] +fn test_feasible_register_assignment_register_conflict() { + // Simple case: v0 depends on v1, v2 depends on v1 + // K=2, assignment [0, 0, 0] - all use register 0 + // In any valid topological order, v1 must come first. + // After computing v1 (reg 0), v1 is live until both v0 and v2 are computed. + // Computing v0 or v2 next would need register 0, but v1 is still live there. + let problem = FeasibleRegisterAssignment::new(3, vec![(0, 1), (2, 1)], 2, vec![0, 0, 0]); + // v1 at pos 0, v0 at pos 1, v2 at pos 2 + // After computing v1 (reg 0): v1 is live (v0, v2 still uncomputed) + // Computing v0 (reg 0): conflict! v1 is still live in reg 0 + assert!(!problem.evaluate(&[1, 0, 2])); + + // With different assignment: v1->reg 1, v0->reg 0, v2->reg 0 + let problem2 = FeasibleRegisterAssignment::new(3, vec![(0, 1), (2, 1)], 2, vec![0, 1, 0]); + // v1 at pos 0, v0 at pos 1, v2 at pos 2 + // After computing v1 (reg 1): v1 is live + // Computing v0 (reg 0): no conflict, v0 uses reg 0 + // After v0 is computed, v1's only remaining dependent is v2 + // Computing v2 (reg 0): v1 is still live (v2 not computed yet)... but + // v1 is in reg 1, v2 is in reg 0 => no conflict + assert!(problem2.evaluate(&[1, 0, 2])); +} + +#[test] +fn test_feasible_register_assignment_brute_force() { + let problem = + FeasibleRegisterAssignment::new(4, vec![(0, 1), (0, 2), (1, 3)], 2, vec![0, 1, 0, 0]); + let solver = BruteForce::new(); + let solution = solver + .find_witness(&problem) + .expect("should find a solution"); + assert!(problem.evaluate(&solution)); +} + +#[test] +fn test_feasible_register_assignment_brute_force_all() { + let problem = + FeasibleRegisterAssignment::new(4, vec![(0, 1), (0, 2), (1, 3)], 2, vec![0, 1, 0, 0]); + let solver = BruteForce::new(); + let solutions = solver.find_all_witnesses(&problem); + assert!(!solutions.is_empty()); + for sol in &solutions { + assert!(problem.evaluate(sol)); + } +} + +#[test] +fn test_feasible_register_assignment_unsatisfiable() { + // v0 depends on v1 and v2, v1 depends on v2 + // All assigned to register 0 with K=1 + // v2 must be computed first. v2 is live (v0 and v1 depend on it). + // Next must be v1 (since v0 depends on v1). But v2 is in reg 0 + // and v2 has uncomputed dependent v0 (excluding v1), so v2 is live. + // Computing v1 in reg 0 conflicts with live v2. + let problem = + FeasibleRegisterAssignment::new(3, vec![(0, 1), (0, 2), (1, 2)], 1, vec![0, 0, 0]); + let solver = BruteForce::new(); + assert!(solver.find_witness(&problem).is_none()); +} + +#[test] +fn test_feasible_register_assignment_serialization() { + let problem = + FeasibleRegisterAssignment::new(4, vec![(0, 1), (0, 2), (1, 3)], 2, vec![0, 1, 0, 0]); + let json = serde_json::to_value(&problem).unwrap(); + let restored: FeasibleRegisterAssignment = serde_json::from_value(json).unwrap(); + assert_eq!(restored.num_vertices(), problem.num_vertices()); + assert_eq!(restored.num_arcs(), problem.num_arcs()); + assert_eq!(restored.num_registers(), problem.num_registers()); + assert_eq!(restored.arcs(), problem.arcs()); + assert_eq!(restored.assignment(), problem.assignment()); +} + +#[test] +fn test_feasible_register_assignment_empty() { + let problem = FeasibleRegisterAssignment::new(0, vec![], 0, vec![]); + assert_eq!(problem.num_vertices(), 0); + assert_eq!(problem.dims(), Vec::::new()); + assert!(problem.evaluate(&[])); +} + +#[test] +fn test_feasible_register_assignment_single_vertex() { + let problem = FeasibleRegisterAssignment::new(1, vec![], 1, vec![0]); + assert!(problem.evaluate(&[0])); +} + +#[test] +fn test_feasible_register_assignment_no_dependencies() { + // 3 vertices, no arcs, K=2, assignment [0, 1, 0] + // Any permutation is valid as long as no register conflict. + // v0(reg 0) and v2(reg 0): since there are no dependencies, no vertex is + // ever "live" (no dependents), so no conflicts can arise. + let problem = FeasibleRegisterAssignment::new(3, vec![], 2, vec![0, 1, 0]); + // Any order works since no vertex has dependents => nothing is ever live + assert!(problem.evaluate(&[0, 1, 2])); + assert!(problem.evaluate(&[2, 1, 0])); +} diff --git a/src/unit_tests/models/misc/non_liveness_free_petri_net.rs b/src/unit_tests/models/misc/non_liveness_free_petri_net.rs new file mode 100644 index 00000000..54704ffc --- /dev/null +++ b/src/unit_tests/models/misc/non_liveness_free_petri_net.rs @@ -0,0 +1,184 @@ +use crate::models::misc::NonLivenessFreePetriNet; +use crate::solvers::BruteForce; +use crate::traits::Problem; +use crate::types::Or; + +/// Chain net: t0 moves token s0->s1, t1 moves s1->s2, t2 moves s2->s3, then deadlock. +fn chain_net() -> NonLivenessFreePetriNet { + NonLivenessFreePetriNet::new( + 4, + 3, + vec![(0, 0), (1, 1), (2, 2)], + vec![(0, 1), (1, 2), (2, 3)], + vec![1, 0, 0, 0], + ) +} + +/// Cycle net: token oscillates between two places, both transitions always fireable. +fn cycle_net() -> NonLivenessFreePetriNet { + NonLivenessFreePetriNet::new(2, 2, vec![(0, 0), (1, 1)], vec![(0, 1), (1, 0)], vec![1, 0]) +} + +#[test] +fn test_non_liveness_free_petri_net_basic() { + let problem = chain_net(); + assert_eq!(problem.num_places(), 4); + assert_eq!(problem.num_transitions(), 3); + assert_eq!(problem.num_arcs(), 6); + assert_eq!(problem.initial_token_sum(), 1); + assert_eq!(problem.dims(), vec![2; 3]); + assert_eq!(problem.num_variables(), 3); + assert_eq!( + ::NAME, + "NonLivenessFreePetriNet" + ); + assert_eq!(::variant(), vec![]); +} + +#[test] +fn test_non_liveness_chain_net_is_not_live() { + let problem = chain_net(); + // All transitions are dead: after the chain fires, nothing can fire again. + // Selecting all transitions should yield true. + assert_eq!(problem.evaluate(&[1, 1, 1]), Or(true)); + // Selecting just one transition should also yield true. + assert_eq!(problem.evaluate(&[1, 0, 0]), Or(true)); + assert_eq!(problem.evaluate(&[0, 1, 0]), Or(true)); + assert_eq!(problem.evaluate(&[0, 0, 1]), Or(true)); + // Selecting no transition yields false (no claimed dead transition). + assert_eq!(problem.evaluate(&[0, 0, 0]), Or(false)); +} + +#[test] +fn test_non_liveness_cycle_net_is_live() { + let problem = cycle_net(); + // In the cycle net, both transitions can always fire. No transition is dead. + assert_eq!(problem.evaluate(&[1, 1]), Or(false)); + assert_eq!(problem.evaluate(&[1, 0]), Or(false)); + assert_eq!(problem.evaluate(&[0, 1]), Or(false)); + assert_eq!(problem.evaluate(&[0, 0]), Or(false)); +} + +#[test] +fn test_non_liveness_solver_finds_witness_chain() { + let problem = chain_net(); + let solver = BruteForce::new(); + let witness = solver.find_witness(&problem).unwrap(); + assert_eq!(problem.evaluate(&witness), Or(true)); +} + +#[test] +fn test_non_liveness_solver_no_witness_cycle() { + let problem = cycle_net(); + let solver = BruteForce::new(); + assert!(solver.find_witness(&problem).is_none()); +} + +#[test] +fn test_non_liveness_wrong_config_length() { + let problem = chain_net(); + assert_eq!(problem.evaluate(&[1, 0]), Or(false)); + assert_eq!(problem.evaluate(&[1, 0, 0, 0]), Or(false)); +} + +#[test] +fn test_non_liveness_serialization_round_trip() { + let problem = chain_net(); + let json = serde_json::to_value(&problem).unwrap(); + assert_eq!( + json, + serde_json::json!({ + "num_places": 4, + "num_transitions": 3, + "place_to_transition": [[0, 0], [1, 1], [2, 2]], + "transition_to_place": [[0, 1], [1, 2], [2, 3]], + "initial_marking": [1, 0, 0, 0], + }) + ); + + let restored: NonLivenessFreePetriNet = serde_json::from_value(json).unwrap(); + assert_eq!(restored.num_places(), problem.num_places()); + assert_eq!(restored.num_transitions(), problem.num_transitions()); + assert_eq!( + restored.place_to_transition(), + problem.place_to_transition() + ); + assert_eq!( + restored.transition_to_place(), + problem.transition_to_place() + ); + assert_eq!(restored.initial_marking(), problem.initial_marking()); +} + +#[test] +fn test_non_liveness_deserialization_rejects_invalid() { + let invalid_cases = [ + // Zero places + serde_json::json!({ + "num_places": 0, + "num_transitions": 1, + "place_to_transition": [], + "transition_to_place": [], + "initial_marking": [], + }), + // Zero transitions + serde_json::json!({ + "num_places": 1, + "num_transitions": 0, + "place_to_transition": [], + "transition_to_place": [], + "initial_marking": [0], + }), + // Marking length mismatch + serde_json::json!({ + "num_places": 2, + "num_transitions": 1, + "place_to_transition": [], + "transition_to_place": [], + "initial_marking": [0], + }), + // Place index out of range + serde_json::json!({ + "num_places": 2, + "num_transitions": 1, + "place_to_transition": [[5, 0]], + "transition_to_place": [], + "initial_marking": [0, 0], + }), + ]; + + for invalid in invalid_cases { + assert!(serde_json::from_value::(invalid).is_err()); + } +} + +#[test] +#[should_panic(expected = "at least one place")] +fn test_non_liveness_zero_places_panics() { + NonLivenessFreePetriNet::new(0, 1, vec![], vec![], vec![]); +} + +#[test] +#[should_panic(expected = "at least one transition")] +fn test_non_liveness_zero_transitions_panics() { + NonLivenessFreePetriNet::new(1, 0, vec![], vec![], vec![0]); +} + +#[test] +#[should_panic(expected = "does not match")] +fn test_non_liveness_marking_length_mismatch_panics() { + NonLivenessFreePetriNet::new(2, 1, vec![], vec![], vec![0]); +} + +#[test] +#[should_panic(expected = "Free-choice violation")] +fn test_non_liveness_free_choice_violation_panics() { + // t0 has preset {s0}, t1 has preset {s0, s1} -- they share s0 but have different presets + NonLivenessFreePetriNet::new( + 2, + 2, + vec![(0, 0), (0, 1), (1, 1)], + vec![(0, 0), (1, 1)], + vec![1, 1], + ); +} diff --git a/src/unit_tests/models/misc/numerical_3_dimensional_matching.rs b/src/unit_tests/models/misc/numerical_3_dimensional_matching.rs new file mode 100644 index 00000000..8c9e5d55 --- /dev/null +++ b/src/unit_tests/models/misc/numerical_3_dimensional_matching.rs @@ -0,0 +1,186 @@ +use crate::models::misc::Numerical3DimensionalMatching; +use crate::solvers::BruteForce; +use crate::traits::Problem; +use crate::types::Or; + +fn yes_problem() -> Numerical3DimensionalMatching { + // W=[4,5], X=[4,5], Y=[5,7], B=15, m=2 + // Valid: w0↔x0,y1 (4+4+7=15) and w1↔x1,y0 (5+5+5=15) + Numerical3DimensionalMatching::new(vec![4, 5], vec![4, 5], vec![5, 7], 15) +} + +#[test] +fn test_numerical_3dm_creation() { + let problem = yes_problem(); + assert_eq!(problem.sizes_w(), &[4, 5]); + assert_eq!(problem.sizes_x(), &[4, 5]); + assert_eq!(problem.sizes_y(), &[5, 7]); + assert_eq!(problem.bound(), 15); + assert_eq!(problem.num_groups(), 2); + assert_eq!(problem.dims(), vec![2; 4]); + assert_eq!(problem.num_variables(), 4); + assert_eq!( + ::NAME, + "Numerical3DimensionalMatching" + ); + assert_eq!( + ::variant(), + vec![] + ); +} + +#[test] +fn test_numerical_3dm_evaluate_valid() { + let problem = yes_problem(); + // config [0, 1, 1, 0]: w0↔x0,y1 (4+4+7=15), w1↔x1,y0 (5+5+5=15) + assert_eq!(problem.evaluate(&[0, 1, 1, 0]), Or(true)); +} + +#[test] +fn test_numerical_3dm_evaluate_invalid_sums() { + let problem = yes_problem(); + // config [0, 1, 0, 1]: w0↔x0,y0 (4+4+5=13≠15) + assert_eq!(problem.evaluate(&[0, 1, 0, 1]), Or(false)); + // config [1, 0, 0, 1]: w0↔x1,y0 (4+5+5=14≠15) + assert_eq!(problem.evaluate(&[1, 0, 0, 1]), Or(false)); +} + +#[test] +fn test_numerical_3dm_evaluate_invalid_permutation() { + let problem = yes_problem(); + // Both X assignments point to 0 — not a permutation + assert_eq!(problem.evaluate(&[0, 0, 0, 1]), Or(false)); + // Both Y assignments point to 1 — not a permutation + assert_eq!(problem.evaluate(&[0, 1, 1, 1]), Or(false)); +} + +#[test] +fn test_numerical_3dm_evaluate_wrong_length() { + let problem = yes_problem(); + assert_eq!(problem.evaluate(&[0, 1, 1]), Or(false)); + assert_eq!(problem.evaluate(&[0, 1, 1, 0, 0]), Or(false)); +} + +#[test] +fn test_numerical_3dm_evaluate_out_of_range() { + let problem = yes_problem(); + // Index 2 is out of range for m=2 + assert_eq!(problem.evaluate(&[0, 2, 1, 0]), Or(false)); +} + +#[test] +fn test_numerical_3dm_solver_finds_witness() { + let problem = yes_problem(); + let solver = BruteForce::new(); + let solution = solver.find_witness(&problem).unwrap(); + assert_eq!(problem.evaluate(&solution), Or(true)); +} + +#[test] +fn test_numerical_3dm_solver_unsatisfiable() { + // W=[4,5], X=[4,6], Y=[5,7], B=15 — total=31≠30, invalid + // Need a valid instance that is unsatisfiable. + // W=[4,5], X=[4,5], Y=[4,7], B=15 — total=4+5+4+5+4+7=29≠30, invalid. + // W=[4,6], X=[4,6], Y=[4,6], B=15 — total=4+6+4+6+4+6=30=2*15, all between 3.75 and 7.5. + // Check: w0↔x0,y0: 4+4+4=12≠15; w0↔x0,y1: 4+4+6=14≠15; w0↔x1,y0: 4+6+4=14≠15; w0↔x1,y1: 4+6+6=16≠15 + // No valid matching exists! + let problem = Numerical3DimensionalMatching::new(vec![4, 6], vec![4, 6], vec![4, 6], 15); + let solver = BruteForce::new(); + assert!(solver.find_witness(&problem).is_none()); +} + +#[test] +fn test_numerical_3dm_serialization_round_trip() { + let problem = yes_problem(); + let json = serde_json::to_value(&problem).unwrap(); + assert_eq!( + json, + serde_json::json!({ + "sizes_w": [4, 5], + "sizes_x": [4, 5], + "sizes_y": [5, 7], + "bound": 15, + }) + ); + + let restored: Numerical3DimensionalMatching = serde_json::from_value(json).unwrap(); + assert_eq!(restored.sizes_w(), problem.sizes_w()); + assert_eq!(restored.sizes_x(), problem.sizes_x()); + assert_eq!(restored.sizes_y(), problem.sizes_y()); + assert_eq!(restored.bound(), problem.bound()); +} + +#[test] +fn test_numerical_3dm_deserialization_rejects_invalid() { + let invalid_cases = [ + // Empty sets + serde_json::json!({ + "sizes_w": [], + "sizes_x": [], + "sizes_y": [], + "bound": 15, + }), + // Different set sizes + serde_json::json!({ + "sizes_w": [4, 5], + "sizes_x": [4], + "sizes_y": [5, 7], + "bound": 15, + }), + // Zero size + serde_json::json!({ + "sizes_w": [0, 5], + "sizes_x": [4, 5], + "sizes_y": [5, 7], + "bound": 15, + }), + // Size outside B/4..B/2 range + serde_json::json!({ + "sizes_w": [3, 5], + "sizes_x": [4, 5], + "sizes_y": [5, 8], + "bound": 15, + }), + // Wrong total sum + serde_json::json!({ + "sizes_w": [4, 5], + "sizes_x": [4, 5], + "sizes_y": [5, 7], + "bound": 14, + }), + ]; + + for invalid in invalid_cases { + assert!(serde_json::from_value::(invalid).is_err()); + } +} + +#[test] +#[should_panic(expected = "at least one element")] +fn test_numerical_3dm_empty_sets_panics() { + Numerical3DimensionalMatching::new(vec![], vec![], vec![], 15); +} + +#[test] +#[should_panic(expected = "same size")] +fn test_numerical_3dm_mismatched_sizes_panics() { + Numerical3DimensionalMatching::new(vec![4, 5], vec![4], vec![5, 7], 15); +} + +#[test] +#[should_panic(expected = "positive")] +fn test_numerical_3dm_zero_size_panics() { + Numerical3DimensionalMatching::new(vec![0, 5], vec![4, 5], vec![5, 7], 15); +} + +#[test] +#[should_panic(expected = "strictly between")] +fn test_numerical_3dm_size_outside_bounds_panics() { + Numerical3DimensionalMatching::new(vec![3, 5], vec![4, 5], vec![5, 8], 15); +} + +#[test] +#[should_panic(expected = "must equal m * bound")] +fn test_numerical_3dm_wrong_total_sum_panics() { + Numerical3DimensionalMatching::new(vec![4, 5], vec![4, 5], vec![5, 6], 14); +} diff --git a/src/unit_tests/models/misc/subset_product.rs b/src/unit_tests/models/misc/subset_product.rs new file mode 100644 index 00000000..1832e718 --- /dev/null +++ b/src/unit_tests/models/misc/subset_product.rs @@ -0,0 +1,176 @@ +use super::*; +use crate::solvers::BruteForce; +use crate::traits::Problem; +use num_bigint::BigUint; + +fn bu(n: u32) -> BigUint { + BigUint::from(n) +} + +fn buv(values: &[u32]) -> Vec { + values.iter().copied().map(BigUint::from).collect() +} + +#[test] +fn test_subsetproduct_basic() { + let problem = SubsetProduct::new(vec![2u32, 3, 5, 7, 6, 10], 210u32); + assert_eq!(problem.num_elements(), 6); + assert_eq!(problem.sizes(), buv(&[2, 3, 5, 7, 6, 10]).as_slice()); + assert_eq!(problem.target(), &bu(210)); + assert_eq!(problem.dims(), vec![2; 6]); + assert_eq!(::NAME, "SubsetProduct"); + assert_eq!(::variant(), vec![]); +} + +#[test] +fn test_subsetproduct_evaluate_satisfying() { + let problem = SubsetProduct::new(vec![2u32, 3, 5, 7, 6, 10], 210u32); + // {2, 3, 5, 7} = 210 + assert!(problem.evaluate(&[1, 1, 1, 1, 0, 0])); + // {3, 7, 10} = 210 + assert!(problem.evaluate(&[0, 1, 0, 1, 0, 1])); +} + +#[test] +fn test_subsetproduct_evaluate_unsatisfying() { + let problem = SubsetProduct::new(vec![2u32, 3, 5, 7, 6, 10], 210u32); + // {2, 3} = 6 != 210 + assert!(!problem.evaluate(&[1, 1, 0, 0, 0, 0])); + // empty = 1 != 210 + assert!(!problem.evaluate(&[0, 0, 0, 0, 0, 0])); + // all = 2*3*5*7*6*10 = 12600 != 210 + assert!(!problem.evaluate(&[1, 1, 1, 1, 1, 1])); +} + +#[test] +fn test_subsetproduct_evaluate_wrong_config_length() { + let problem = SubsetProduct::new(vec![2u32, 3, 5], 30u32); + assert!(!problem.evaluate(&[1, 0])); + assert!(!problem.evaluate(&[1, 0, 0, 0])); +} + +#[test] +fn test_subsetproduct_evaluate_invalid_variable_value() { + let problem = SubsetProduct::new(vec![2u32, 3], 6u32); + assert!(!problem.evaluate(&[2, 0])); +} + +#[test] +fn test_subsetproduct_empty_instance() { + // Empty set, target 1: empty subset product = 1 satisfies + let problem = SubsetProduct::new_unchecked(vec![], bu(1)); + assert_eq!(problem.num_elements(), 0); + assert_eq!(problem.dims(), Vec::::new()); + assert!(problem.evaluate(&[])); +} + +#[test] +fn test_subsetproduct_empty_instance_nonunit_target() { + // Empty set, target 5: impossible (empty product = 1) + let problem = SubsetProduct::new_unchecked(vec![], bu(5)); + assert!(!problem.evaluate(&[])); +} + +#[test] +fn test_subsetproduct_brute_force() { + let problem = SubsetProduct::new(vec![2u32, 3, 5, 7, 6, 10], 210u32); + let solver = BruteForce::new(); + let solution = solver + .find_witness(&problem) + .expect("should find a solution"); + assert!(problem.evaluate(&solution)); +} + +#[test] +fn test_subsetproduct_brute_force_all() { + let problem = SubsetProduct::new(vec![2u32, 3, 5, 7, 6, 10], 210u32); + let solver = BruteForce::new(); + let solutions = solver.find_all_witnesses(&problem); + assert!(!solutions.is_empty()); + for sol in &solutions { + assert!(problem.evaluate(sol)); + } +} + +#[test] +fn test_subsetproduct_unsatisfiable() { + // Target 1000 is unreachable with these sizes + let problem = SubsetProduct::new(vec![2u32, 3, 5], 1000u32); + let solver = BruteForce::new(); + let solution = solver.find_witness(&problem); + assert!(solution.is_none()); +} + +#[test] +fn test_subsetproduct_serialization() { + let problem = SubsetProduct::new(vec![2u32, 3, 5, 7, 6, 10], 210u32); + let json = serde_json::to_value(&problem).unwrap(); + assert_eq!( + json, + serde_json::json!({ + "sizes": ["2", "3", "5", "7", "6", "10"], + "target": "210", + }) + ); + let restored: SubsetProduct = serde_json::from_value(json).unwrap(); + assert_eq!(restored.sizes(), problem.sizes()); + assert_eq!(restored.target(), problem.target()); +} + +#[test] +fn test_subsetproduct_deserialization_legacy_numeric_json() { + let restored: SubsetProduct = serde_json::from_value(serde_json::json!({ + "sizes": [2, 3, 5, 7, 6, 10], + "target": 210, + })) + .unwrap(); + assert_eq!(restored.sizes(), buv(&[2, 3, 5, 7, 6, 10]).as_slice()); + assert_eq!(restored.target(), &bu(210)); +} + +#[test] +fn test_subsetproduct_single_element() { + let problem = SubsetProduct::new(vec![5u32], 5u32); + assert!(problem.evaluate(&[1])); + assert!(!problem.evaluate(&[0])); +} + +#[test] +fn test_subsetproduct_all_selected() { + // Target equals product of all elements + let problem = SubsetProduct::new(vec![2u32, 3, 5], 30u32); + assert!(problem.evaluate(&[1, 1, 1])); // 2*3*5 = 30 +} + +#[test] +fn test_subsetproduct_target_one() { + // Target 1 with non-empty set: only empty subset works (product = 1) + let problem = SubsetProduct::new(vec![2u32, 3, 5], 1u32); + assert!(problem.evaluate(&[0, 0, 0])); // empty subset product = 1 + assert!(!problem.evaluate(&[1, 0, 0])); // 2 != 1 +} + +#[test] +#[should_panic(expected = "positive")] +fn test_subsetproduct_negative_sizes_panic() { + SubsetProduct::new(vec![-1i64, 2, 3], 4u32); +} + +#[test] +#[should_panic(expected = "positive")] +fn test_subsetproduct_zero_size_panic() { + SubsetProduct::new(vec![0i64, 2, 3], 4u32); +} + +#[test] +#[should_panic(expected = "positive")] +fn test_subsetproduct_zero_target_panic() { + SubsetProduct::new(vec![2u32, 3], 0u32); +} + +#[test] +fn test_subsetproduct_large_integer_input() { + let problem = SubsetProduct::new(vec![2i128, 3, 5, 7, 6, 10], 210i128); + assert!(problem.evaluate(&[1, 1, 1, 1, 0, 0])); // 2*3*5*7 = 210 + assert!(!problem.evaluate(&[1, 1, 0, 0, 0, 0])); // 2*3 = 6 +} diff --git a/src/unit_tests/models/set/three_dimensional_matching.rs b/src/unit_tests/models/set/three_dimensional_matching.rs new file mode 100644 index 00000000..b7c31d42 --- /dev/null +++ b/src/unit_tests/models/set/three_dimensional_matching.rs @@ -0,0 +1,136 @@ +use super::*; +use crate::solvers::BruteForce; +use crate::traits::Problem; + +#[test] +fn test_three_dimensional_matching_creation() { + let problem = ThreeDimensionalMatching::new( + 3, + vec![(0, 1, 2), (1, 0, 1), (2, 2, 0), (0, 0, 0), (1, 2, 2)], + ); + assert_eq!(problem.universe_size(), 3); + assert_eq!(problem.num_triples(), 5); + assert_eq!(problem.num_variables(), 5); + assert_eq!(problem.dims(), vec![2, 2, 2, 2, 2]); +} + +#[test] +fn test_three_dimensional_matching_evaluation() { + // q = 3, W = X = Y = {0, 1, 2} + // T0=(0,1,2), T1=(1,0,1), T2=(2,2,0), T3=(0,0,0), T4=(1,2,2) + let problem = ThreeDimensionalMatching::new( + 3, + vec![(0, 1, 2), (1, 0, 1), (2, 2, 0), (0, 0, 0), (1, 2, 2)], + ); + + // T0, T1, T2: W={0,1,2} distinct, X={1,0,2} distinct, Y={2,1,0} distinct -> valid + assert!(problem.evaluate(&[1, 1, 1, 0, 0])); + + // T0, T3: both have w=0 -> invalid (also only 2 selected, need 3) + assert!(!problem.evaluate(&[1, 0, 0, 1, 0])); + + // T0, T1, T3: w-coordinates {0,1,0} not distinct -> invalid + assert!(!problem.evaluate(&[1, 1, 0, 1, 0])); + + // Only T0 selected (need q=3 triples) + assert!(!problem.evaluate(&[1, 0, 0, 0, 0])); + + // All selected (too many) + assert!(!problem.evaluate(&[1, 1, 1, 1, 1])); + + // None selected + assert!(!problem.evaluate(&[0, 0, 0, 0, 0])); +} + +#[test] +fn test_three_dimensional_matching_solver() { + let problem = ThreeDimensionalMatching::new( + 3, + vec![(0, 1, 2), (1, 0, 1), (2, 2, 0), (0, 0, 0), (1, 2, 2)], + ); + + let solver = BruteForce::new(); + let solutions = solver.find_all_witnesses(&problem); + + assert!(!solutions.is_empty()); + for sol in &solutions { + assert!(problem.evaluate(sol)); + } + // Verify the known solution is in there + assert!(solutions.contains(&vec![1, 1, 1, 0, 0])); +} + +#[test] +fn test_three_dimensional_matching_no_solution() { + // q = 2, all triples share w=0 -> no matching of size 2 possible + let problem = ThreeDimensionalMatching::new(2, vec![(0, 0, 0), (0, 1, 1), (0, 0, 1)]); + + let solver = BruteForce::new(); + let solutions = solver.find_all_witnesses(&problem); + assert!(solutions.is_empty()); +} + +#[test] +fn test_three_dimensional_matching_serialization() { + let problem = ThreeDimensionalMatching::new(2, vec![(0, 1, 0), (1, 0, 1)]); + let json = serde_json::to_string(&problem).unwrap(); + let deserialized: ThreeDimensionalMatching = serde_json::from_str(&json).unwrap(); + assert_eq!(deserialized.universe_size(), problem.universe_size()); + assert_eq!(deserialized.num_triples(), problem.num_triples()); + assert_eq!(deserialized.triples(), problem.triples()); +} + +#[test] +fn test_three_dimensional_matching_empty() { + // q = 0: trivially satisfiable + let problem = ThreeDimensionalMatching::new(0, vec![]); + assert!(problem.evaluate(&[])); + let solver = BruteForce::new(); + let solutions = solver.find_all_witnesses(&problem); + assert_eq!(solutions, vec![Vec::::new()]); +} + +#[test] +fn test_three_dimensional_matching_get_triple() { + let problem = ThreeDimensionalMatching::new(2, vec![(0, 1, 0), (1, 0, 1)]); + assert_eq!(problem.get_triple(0), Some(&(0, 1, 0))); + assert_eq!(problem.get_triple(1), Some(&(1, 0, 1))); + assert_eq!(problem.get_triple(2), None); +} + +#[test] +fn test_three_dimensional_matching_rejects_wrong_config_length() { + let problem = ThreeDimensionalMatching::new(2, vec![(0, 1, 0), (1, 0, 1)]); + assert!(!problem.evaluate(&[1, 1, 0])); +} + +#[test] +fn test_three_dimensional_matching_rejects_non_binary_config_values() { + let problem = ThreeDimensionalMatching::new(2, vec![(0, 1, 0), (1, 0, 1)]); + assert!(!problem.evaluate(&[1, 2])); +} + +#[test] +#[should_panic(expected = "outside 0..")] +fn test_three_dimensional_matching_element_out_of_range() { + ThreeDimensionalMatching::new(2, vec![(0, 3, 0)]); +} + +#[test] +fn test_three_dimensional_matching_is_valid_solution() { + let problem = ThreeDimensionalMatching::new(2, vec![(0, 1, 0), (1, 0, 1)]); + assert!(problem.is_valid_solution(&[1, 1])); + assert!(!problem.is_valid_solution(&[1, 0])); +} + +#[test] +fn test_three_dimensional_matching_duplicate_coordinates() { + // q = 2, T0=(0,0,0), T1=(1,1,1), T2=(0,1,0) + // T0+T1 is valid matching; T0+T2 shares w=0; T1+T2 shares y (not y, T1 y=1, T2 y=0, ok) + // Actually T1+T2: w={1,0} ok, x={1,1} NOT distinct -> invalid + let problem = ThreeDimensionalMatching::new(2, vec![(0, 0, 0), (1, 1, 1), (0, 1, 0)]); + + assert!(problem.evaluate(&[1, 1, 0])); // T0+T1: w={0,1}, x={0,1}, y={0,1} all distinct + assert!(!problem.evaluate(&[1, 0, 1])); // T0+T2: w={0,0} not distinct + assert!(!problem.evaluate(&[0, 1, 1])); // T1+T2: x={1,1} not distinct +} From 77a280011e03945ed7814e7e37cd473ebb55dd57 Mon Sep 17 00:00:00 2001 From: Xiwei Pan Date: Tue, 31 Mar 2026 02:53:23 +0800 Subject: [PATCH 18/21] fix: resolve clippy needless_range_loop in MinimumWeightSolutionToLinearEquations Co-Authored-By: Claude Opus 4.6 (1M context) --- .../minimum_weight_solution_to_linear_equations.rs | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/src/models/algebraic/minimum_weight_solution_to_linear_equations.rs b/src/models/algebraic/minimum_weight_solution_to_linear_equations.rs index 138425fb..5457a183 100644 --- a/src/models/algebraic/minimum_weight_solution_to_linear_equations.rs +++ b/src/models/algebraic/minimum_weight_solution_to_linear_equations.rs @@ -131,18 +131,19 @@ impl MinimumWeightSolutionToLinearEquations { aug.swap(pivot_row, swap_row); let pivot_val = aug[pivot_row][col]; + let pivot_row_snapshot = aug[pivot_row].clone(); // Eliminate all other rows. - for r in 0..n { + for (r, row) in aug.iter_mut().enumerate() { if r == pivot_row { continue; } - let factor = aug[r][col]; + let factor = row[col]; if factor == 0 { continue; } // row[r] = pivot_val * row[r] - factor * row[pivot_row] - for c in 0..k + 1 { - aug[r][c] = pivot_val * aug[r][c] - factor * aug[pivot_row][c]; + for (cell, &pv) in row.iter_mut().zip(pivot_row_snapshot.iter()) { + *cell = pivot_val * *cell - factor * pv; } } pivot_row += 1; From 57eb73183f9b4ca161926f6de19a66bfc0561560 Mon Sep 17 00:00:00 2001 From: Xiwei Pan Date: Tue, 31 Mar 2026 12:28:47 +0800 Subject: [PATCH 19/21] fix: code quality improvements for batch models - Fix feasible_register_assignment: reject num_registers==0 with vertices, correct complexity from num_vertices^2*2^num_vertices to factorial(num_vertices) - Fix numerical_3_dimensional_matching complexity: 3^num_groups -> num_groups^(2*num_groups) - Fix subset_product complexity: 2^(num_elements/2) -> 2^num_elements - Extract duplicated config_to_assignment to shared formula/mod.rs utility - Use is_disjoint() in minimum_intersection_graph_basis - Remove redundant is_valid_solution from ThreeDimensionalMatching - Gate SubsetProduct::new_unchecked with #[cfg(test)] Co-Authored-By: Claude Opus 4.6 (1M context) --- src/models/formula/ksat.rs | 7 +------ src/models/formula/mod.rs | 5 +++++ src/models/formula/nae_satisfiability.rs | 6 +----- src/models/formula/non_tautology.rs | 7 +------ src/models/formula/one_in_three_satisfiability.rs | 7 +------ src/models/formula/planar_3_satisfiability.rs | 7 +------ src/models/formula/sat.rs | 7 +------ src/models/graph/minimum_intersection_graph_basis.rs | 6 ++---- src/models/misc/feasible_register_assignment.rs | 10 ++++++++-- src/models/misc/numerical_3_dimensional_matching.rs | 2 +- src/models/misc/subset_product.rs | 9 +++------ src/models/set/three_dimensional_matching.rs | 9 --------- .../models/set/three_dimensional_matching.rs | 4 ++-- 13 files changed, 27 insertions(+), 59 deletions(-) diff --git a/src/models/formula/ksat.rs b/src/models/formula/ksat.rs index 047c902e..75aac7a5 100644 --- a/src/models/formula/ksat.rs +++ b/src/models/formula/ksat.rs @@ -159,11 +159,6 @@ impl KSatisfiability { pub fn is_satisfying(&self, assignment: &[bool]) -> bool { self.clauses.iter().all(|c| c.is_satisfied(assignment)) } - - /// Convert a usize config to boolean assignment. - fn config_to_assignment(config: &[usize]) -> Vec { - config.iter().map(|&v| v == 1).collect() - } } impl Problem for KSatisfiability { @@ -176,7 +171,7 @@ impl Problem for KSatisfiability { fn evaluate(&self, config: &[usize]) -> crate::types::Or { crate::types::Or({ - let assignment = Self::config_to_assignment(config); + let assignment = super::config_to_assignment(config); self.is_satisfying(&assignment) }) } diff --git a/src/models/formula/mod.rs b/src/models/formula/mod.rs index 47a93645..f78f2d7e 100644 --- a/src/models/formula/mod.rs +++ b/src/models/formula/mod.rs @@ -28,6 +28,11 @@ pub use planar_3_satisfiability::Planar3Satisfiability; pub use qbf::{QuantifiedBooleanFormulas, Quantifier}; pub use sat::{CNFClause, Satisfiability}; +/// Convert a binary config (0/1 per variable) to a boolean assignment. +pub(crate) fn config_to_assignment(config: &[usize]) -> Vec { + config.iter().map(|&v| v == 1).collect() +} + #[cfg(feature = "example-db")] pub(crate) fn canonical_model_example_specs() -> Vec { let mut specs = Vec::new(); diff --git a/src/models/formula/nae_satisfiability.rs b/src/models/formula/nae_satisfiability.rs index cbc53b32..c65a6b31 100644 --- a/src/models/formula/nae_satisfiability.rs +++ b/src/models/formula/nae_satisfiability.rs @@ -99,10 +99,6 @@ impl NAESatisfiability { self.evaluate(config).0 } - fn config_to_assignment(config: &[usize]) -> Vec { - config.iter().map(|&v| v == 1).collect() - } - fn literal_value(lit: i32, assignment: &[bool]) -> bool { let var = lit.unsigned_abs() as usize - 1; let value = assignment.get(var).copied().unwrap_or(false); @@ -143,7 +139,7 @@ impl Problem for NAESatisfiability { fn evaluate(&self, config: &[usize]) -> crate::types::Or { crate::types::Or({ - let assignment = Self::config_to_assignment(config); + let assignment = super::config_to_assignment(config); self.is_nae_satisfying(&assignment) }) } diff --git a/src/models/formula/non_tautology.rs b/src/models/formula/non_tautology.rs index c5385eaa..7e983cfb 100644 --- a/src/models/formula/non_tautology.rs +++ b/src/models/formula/non_tautology.rs @@ -123,11 +123,6 @@ impl NonTautology { .all(|&lit| Self::literal_is_true(lit, assignment)) }) } - - /// Convert a usize config to boolean assignment. - fn config_to_assignment(config: &[usize]) -> Vec { - config.iter().map(|&v| v == 1).collect() - } } impl Problem for NonTautology { @@ -140,7 +135,7 @@ impl Problem for NonTautology { fn evaluate(&self, config: &[usize]) -> crate::types::Or { crate::types::Or({ - let assignment = Self::config_to_assignment(config); + let assignment = super::config_to_assignment(config); self.is_falsifying(&assignment) }) } diff --git a/src/models/formula/one_in_three_satisfiability.rs b/src/models/formula/one_in_three_satisfiability.rs index 7feb35ad..8b5453ee 100644 --- a/src/models/formula/one_in_three_satisfiability.rs +++ b/src/models/formula/one_in_three_satisfiability.rs @@ -130,11 +130,6 @@ impl OneInThreeSatisfiability { true_count == 1 }) } - - /// Convert a usize config to boolean assignment. - fn config_to_assignment(config: &[usize]) -> Vec { - config.iter().map(|&v| v == 1).collect() - } } impl Problem for OneInThreeSatisfiability { @@ -147,7 +142,7 @@ impl Problem for OneInThreeSatisfiability { fn evaluate(&self, config: &[usize]) -> crate::types::Or { crate::types::Or({ - let assignment = Self::config_to_assignment(config); + let assignment = super::config_to_assignment(config); self.is_one_in_three_satisfying(&assignment) }) } diff --git a/src/models/formula/planar_3_satisfiability.rs b/src/models/formula/planar_3_satisfiability.rs index a3992f54..b3b91871 100644 --- a/src/models/formula/planar_3_satisfiability.rs +++ b/src/models/formula/planar_3_satisfiability.rs @@ -126,11 +126,6 @@ impl Planar3Satisfiability { pub fn is_satisfying(&self, assignment: &[bool]) -> bool { self.clauses.iter().all(|c| c.is_satisfied(assignment)) } - - /// Convert a usize config to boolean assignment. - fn config_to_assignment(config: &[usize]) -> Vec { - config.iter().map(|&v| v == 1).collect() - } } impl Problem for Planar3Satisfiability { @@ -143,7 +138,7 @@ impl Problem for Planar3Satisfiability { fn evaluate(&self, config: &[usize]) -> crate::types::Or { crate::types::Or({ - let assignment = Self::config_to_assignment(config); + let assignment = super::config_to_assignment(config); self.is_satisfying(&assignment) }) } diff --git a/src/models/formula/sat.rs b/src/models/formula/sat.rs index d84f3594..8be2e2b9 100644 --- a/src/models/formula/sat.rs +++ b/src/models/formula/sat.rs @@ -171,11 +171,6 @@ impl Satisfiability { pub fn is_valid_solution(&self, config: &[usize]) -> bool { self.evaluate(config).0 } - - /// Convert a usize config to boolean assignment. - fn config_to_assignment(config: &[usize]) -> Vec { - config.iter().map(|&v| v == 1).collect() - } } impl Problem for Satisfiability { @@ -188,7 +183,7 @@ impl Problem for Satisfiability { fn evaluate(&self, config: &[usize]) -> crate::types::Or { crate::types::Or({ - let assignment = Self::config_to_assignment(config); + let assignment = super::config_to_assignment(config); self.is_satisfying(&assignment) }) } diff --git a/src/models/graph/minimum_intersection_graph_basis.rs b/src/models/graph/minimum_intersection_graph_basis.rs index ec1792a1..d7879596 100644 --- a/src/models/graph/minimum_intersection_graph_basis.rs +++ b/src/models/graph/minimum_intersection_graph_basis.rs @@ -136,7 +136,7 @@ where // Check edge constraints: for every edge (u, v), S[u] ∩ S[v] ≠ ∅ let edges = self.graph.edges(); for &(u, v) in &edges { - if subsets[u].intersection(&subsets[v]).next().is_none() { + if subsets[u].is_disjoint(&subsets[v]) { return Min(None); } } @@ -144,9 +144,7 @@ where // Check non-edge constraints: for every non-edge pair (u, v), S[u] ∩ S[v] = ∅ for u in 0..n { for v in (u + 1)..n { - if !self.graph.has_edge(u, v) - && subsets[u].intersection(&subsets[v]).next().is_some() - { + if !self.graph.has_edge(u, v) && !subsets[u].is_disjoint(&subsets[v]) { return Min(None); } } diff --git a/src/models/misc/feasible_register_assignment.rs b/src/models/misc/feasible_register_assignment.rs index 651b5ec3..105ca697 100644 --- a/src/models/misc/feasible_register_assignment.rs +++ b/src/models/misc/feasible_register_assignment.rs @@ -100,9 +100,15 @@ impl FeasibleRegisterAssignment { assignment.len(), num_vertices ); + if num_vertices > 0 { + assert!( + num_registers > 0, + "num_registers must be positive when there are vertices" + ); + } for (v, &r) in assignment.iter().enumerate() { assert!( - r < num_registers || num_registers == 0, + r < num_registers, "Assignment[{}] = {} is out of bounds for {} registers", v, r, @@ -229,7 +235,7 @@ impl Problem for FeasibleRegisterAssignment { } crate::declare_variants! { - default FeasibleRegisterAssignment => "num_vertices ^ 2 * 2 ^ num_vertices", + default FeasibleRegisterAssignment => "factorial(num_vertices)", } #[cfg(feature = "example-db")] diff --git a/src/models/misc/numerical_3_dimensional_matching.rs b/src/models/misc/numerical_3_dimensional_matching.rs index c657f513..cb436364 100644 --- a/src/models/misc/numerical_3_dimensional_matching.rs +++ b/src/models/misc/numerical_3_dimensional_matching.rs @@ -212,7 +212,7 @@ impl Problem for Numerical3DimensionalMatching { } crate::declare_variants! { - default Numerical3DimensionalMatching => "3^num_groups", + default Numerical3DimensionalMatching => "num_groups^(2 * num_groups)", } #[cfg(feature = "example-db")] diff --git a/src/models/misc/subset_product.rs b/src/models/misc/subset_product.rs index 75bc9c97..5d558f5c 100644 --- a/src/models/misc/subset_product.rs +++ b/src/models/misc/subset_product.rs @@ -83,11 +83,8 @@ impl SubsetProduct { Self { sizes, target } } - /// Create a new SubsetProduct instance without validating sizes. - /// - /// This is intended for reductions that produce SubsetProduct instances - /// where positivity is guaranteed by construction. - #[allow(dead_code)] + /// Create a SubsetProduct without validating sizes (for testing edge cases). + #[cfg(test)] pub(crate) fn new_unchecked(sizes: Vec, target: BigUint) -> Self { Self { sizes, target } } @@ -140,7 +137,7 @@ impl Problem for SubsetProduct { } crate::declare_variants! { - default SubsetProduct => "2^(num_elements / 2)", + default SubsetProduct => "2^num_elements", } mod decimal_biguint { diff --git a/src/models/set/three_dimensional_matching.rs b/src/models/set/three_dimensional_matching.rs index 8b9a9e2e..fcad3854 100644 --- a/src/models/set/three_dimensional_matching.rs +++ b/src/models/set/three_dimensional_matching.rs @@ -117,15 +117,6 @@ impl ThreeDimensionalMatching { pub fn get_triple(&self, index: usize) -> Option<&(usize, usize, usize)> { self.triples.get(index) } - - /// Check if a configuration is a valid three-dimensional matching. - /// - /// A valid matching selects exactly q triples where all W-coordinates - /// are distinct, all X-coordinates are distinct, and all Y-coordinates - /// are distinct. - pub fn is_valid_solution(&self, config: &[usize]) -> bool { - self.evaluate(config).0 - } } impl Problem for ThreeDimensionalMatching { diff --git a/src/unit_tests/models/set/three_dimensional_matching.rs b/src/unit_tests/models/set/three_dimensional_matching.rs index b7c31d42..22b15de3 100644 --- a/src/unit_tests/models/set/three_dimensional_matching.rs +++ b/src/unit_tests/models/set/three_dimensional_matching.rs @@ -119,8 +119,8 @@ fn test_three_dimensional_matching_element_out_of_range() { #[test] fn test_three_dimensional_matching_is_valid_solution() { let problem = ThreeDimensionalMatching::new(2, vec![(0, 1, 0), (1, 0, 1)]); - assert!(problem.is_valid_solution(&[1, 1])); - assert!(!problem.is_valid_solution(&[1, 0])); + assert!(problem.evaluate(&[1, 1]).0); + assert!(!problem.evaluate(&[1, 0]).0); } #[test] From 106044775e4bb112279c66051732b3fc1fee7c70 Mon Sep 17 00:00:00 2001 From: Xiwei Pan Date: Tue, 31 Mar 2026 12:40:51 +0800 Subject: [PATCH 20/21] refactor: improve code quality across batch models - Cache globally_dead in NonLivenessFreePetriNet constructor (was recomputed on every evaluate() call) - Precompute dependencies/dependents in FeasibleRegisterAssignment constructor (was rebuilt on every is_feasible() call) - Remove dead has_input vector in NonLivenessFreePetriNet::enabled_transitions - Extract shared BigUint serde modules from SubsetSum/SubsetProduct into biguint_serde.rs (was duplicated verbatim in both files) - Add early exit in SubsetProduct::evaluate when product exceeds target - Single-pass grouping in MinimumCoveringByCliques::is_valid_cover (was O(max_group * num_edges), now O(num_edges)) Co-Authored-By: Claude Opus 4.6 (1M context) --- .../graph/minimum_covering_by_cliques.rs | 24 +++---- src/models/misc/biguint_serde.rs | 63 ++++++++++++++++ .../misc/feasible_register_assignment.rs | 72 ++++++++++++++----- src/models/misc/mod.rs | 1 + .../misc/non_liveness_free_petri_net.rs | 22 +++--- src/models/misc/subset_product.rs | 69 ++---------------- src/models/misc/subset_sum.rs | 66 +---------------- 7 files changed, 147 insertions(+), 170 deletions(-) create mode 100644 src/models/misc/biguint_serde.rs diff --git a/src/models/graph/minimum_covering_by_cliques.rs b/src/models/graph/minimum_covering_by_cliques.rs index bda7f3cd..55080af3 100644 --- a/src/models/graph/minimum_covering_by_cliques.rs +++ b/src/models/graph/minimum_covering_by_cliques.rs @@ -94,24 +94,22 @@ impl MinimumCoveringByCliques { return false; } - // Collect vertices per group and check clique property. + // Group edges by their assigned clique in a single pass. let max_group = match config.iter().max() { Some(&m) => m, None => return true, // no edges → trivially valid }; - for group in 0..=max_group { - let vertices: HashSet = config - .iter() - .enumerate() - .filter(|(_, &g)| g == group) - .flat_map(|(idx, _)| { - let (u, v) = edges[idx]; - [u, v] - }) - .collect(); - - let verts: Vec = vertices.into_iter().collect(); + let mut groups: Vec> = vec![HashSet::new(); max_group + 1]; + for (idx, &group) in config.iter().enumerate() { + let (u, v) = edges[idx]; + groups[group].insert(u); + groups[group].insert(v); + } + + // Check that each group's vertices form a clique. + for vertices in &groups { + let verts: Vec = vertices.iter().copied().collect(); for i in 0..verts.len() { for j in (i + 1)..verts.len() { if !self.graph.has_edge(verts[i], verts[j]) { diff --git a/src/models/misc/biguint_serde.rs b/src/models/misc/biguint_serde.rs new file mode 100644 index 00000000..0911144d --- /dev/null +++ b/src/models/misc/biguint_serde.rs @@ -0,0 +1,63 @@ +//! Shared serde helpers for `BigUint` fields (decimal string representation). + +pub(crate) mod decimal_biguint { + use num_bigint::BigUint; + use serde::de::Error; + use serde::{Deserialize, Deserializer, Serializer}; + + #[derive(Deserialize)] + #[serde(untagged)] + pub enum Repr { + String(String), + U64(u64), + I64(i64), + } + + pub fn parse_repr(value: Repr) -> Result { + match value { + Repr::String(s) => BigUint::parse_bytes(s.as_bytes(), 10) + .ok_or_else(|| E::custom(format!("invalid decimal integer: {s}"))), + Repr::U64(n) => Ok(BigUint::from(n)), + Repr::I64(n) if n >= 0 => Ok(BigUint::from(n as u64)), + Repr::I64(n) => Err(E::custom(format!("expected nonnegative integer, got {n}"))), + } + } + + pub fn serialize(value: &BigUint, serializer: S) -> Result + where + S: Serializer, + { + serializer.serialize_str(&value.to_str_radix(10)) + } + + pub fn deserialize<'de, D>(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + parse_repr(Repr::deserialize(deserializer)?) + } +} + +pub(crate) mod decimal_biguint_vec { + use num_bigint::BigUint; + use serde::{Deserialize, Deserializer, Serialize, Serializer}; + + pub fn serialize(values: &[BigUint], serializer: S) -> Result + where + S: Serializer, + { + let strings: Vec = values.iter().map(ToString::to_string).collect(); + strings.serialize(serializer) + } + + pub fn deserialize<'de, D>(deserializer: D) -> Result, D::Error> + where + D: Deserializer<'de>, + { + let values = Vec::::deserialize(deserializer)?; + values + .into_iter() + .map(super::decimal_biguint::parse_repr::) + .collect() + } +} diff --git a/src/models/misc/feasible_register_assignment.rs b/src/models/misc/feasible_register_assignment.rs index 105ca697..d6657840 100644 --- a/src/models/misc/feasible_register_assignment.rs +++ b/src/models/misc/feasible_register_assignment.rs @@ -7,7 +7,7 @@ use crate::registry::{FieldInfo, ProblemSchemaEntry}; use crate::traits::Problem; -use serde::{Deserialize, Serialize}; +use serde::{Deserialize, Deserializer, Serialize}; inventory::submit! { ProblemSchemaEntry { @@ -57,7 +57,7 @@ inventory::submit! { /// let solution = solver.find_witness(&problem); /// assert!(solution.is_some()); /// ``` -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize)] pub struct FeasibleRegisterAssignment { /// Number of vertices. num_vertices: usize, @@ -67,6 +67,38 @@ pub struct FeasibleRegisterAssignment { num_registers: usize, /// Register assignment f(v) for each vertex. assignment: Vec, + /// Precomputed: dependencies[v] = vertices that v depends on. + #[serde(skip)] + dependencies: Vec>, + /// Precomputed: dependents[u] = vertices that depend on u. + #[serde(skip)] + dependents: Vec>, +} + +#[derive(Deserialize)] +struct FeasibleRegisterAssignmentData { + num_vertices: usize, + arcs: Vec<(usize, usize)>, + num_registers: usize, + assignment: Vec, +} + +impl<'de> Deserialize<'de> for FeasibleRegisterAssignment { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let data = FeasibleRegisterAssignmentData::deserialize(deserializer)?; + let (dependencies, dependents) = Self::build_adjacency(data.num_vertices, &data.arcs); + Ok(Self { + num_vertices: data.num_vertices, + arcs: data.arcs, + num_registers: data.num_registers, + assignment: data.assignment, + dependencies, + dependents, + }) + } } impl FeasibleRegisterAssignment { @@ -115,14 +147,31 @@ impl FeasibleRegisterAssignment { num_registers ); } + let (dependencies, dependents) = Self::build_adjacency(num_vertices, &arcs); Self { num_vertices, arcs, num_registers, assignment, + dependencies, + dependents, } } + /// Build dependency and dependent adjacency lists from arcs. + fn build_adjacency( + num_vertices: usize, + arcs: &[(usize, usize)], + ) -> (Vec>, Vec>) { + let mut dependencies = vec![vec![]; num_vertices]; + let mut dependents = vec![vec![]; num_vertices]; + for &(v, u) in arcs { + dependencies[v].push(u); + dependents[u].push(v); + } + (dependencies, dependents) + } + /// Get the number of vertices. pub fn num_vertices(&self) -> usize { self.num_vertices @@ -172,14 +221,6 @@ impl FeasibleRegisterAssignment { order[position] = vertex; } - // Build dependency info - let mut dependencies: Vec> = vec![vec![]; n]; - let mut dependents: Vec> = vec![vec![]; n]; - for &(v, u) in &self.arcs { - dependencies[v].push(u); - dependents[u].push(v); - } - // Check topological ordering and register conflicts let mut computed = vec![false; n]; @@ -187,7 +228,7 @@ impl FeasibleRegisterAssignment { let vertex = order[step]; // Check dependencies: all dependencies must have been computed - for &dep in &dependencies[vertex] { + for &dep in &self.dependencies[vertex] { if !computed[dep] { return false; } @@ -195,15 +236,12 @@ impl FeasibleRegisterAssignment { // Check register conflict: the register assigned to this vertex // must not be currently occupied by a live value. - // A previously computed vertex w is "live" if: - // - assignment[w] == assignment[vertex] (same register) - // - w has at least one dependent (other than vertex) that hasn't - // been computed yet. The current vertex is consuming w's value - // at this step, so we exclude it from the liveness check. let reg = self.assignment[vertex]; for &w in &order[..step] { if self.assignment[w] == reg { - let still_live = dependents[w].iter().any(|&d| d != vertex && !computed[d]); + let still_live = self.dependents[w] + .iter() + .any(|&d| d != vertex && !computed[d]); if still_live { return false; } diff --git a/src/models/misc/mod.rs b/src/models/misc/mod.rs index 4146b1a6..54591f19 100644 --- a/src/models/misc/mod.rs +++ b/src/models/misc/mod.rs @@ -54,6 +54,7 @@ pub(crate) mod additional_key; mod betweenness; +pub(crate) mod biguint_serde; mod cyclic_ordering; /// Decode a Lehmer code into a permutation of `0..n`. diff --git a/src/models/misc/non_liveness_free_petri_net.rs b/src/models/misc/non_liveness_free_petri_net.rs index 5171cd77..322a2e72 100644 --- a/src/models/misc/non_liveness_free_petri_net.rs +++ b/src/models/misc/non_liveness_free_petri_net.rs @@ -49,6 +49,9 @@ pub struct NonLivenessFreePetriNet { place_to_transition: Vec<(usize, usize)>, transition_to_place: Vec<(usize, usize)>, initial_marking: Vec, + /// Precomputed globally dead transitions (not serialized). + #[serde(skip)] + globally_dead: Vec, } impl NonLivenessFreePetriNet { @@ -149,13 +152,16 @@ impl NonLivenessFreePetriNet { &transition_to_place, &initial_marking, )?; - Ok(Self { + let mut net = Self { num_places, num_transitions, place_to_transition, transition_to_place, initial_marking, - }) + globally_dead: Vec::new(), + }; + net.globally_dead = net.compute_globally_dead_transitions(); + Ok(net) } /// Create a new `NonLivenessFreePetriNet` instance. @@ -220,18 +226,12 @@ impl NonLivenessFreePetriNet { fn enabled_transitions(&self, marking: &[usize]) -> Vec { let mut enabled = vec![true; self.num_transitions]; // A transition t is enabled iff every input place has at least one token. - // First, mark all transitions that have at least one input place. - let mut has_input = vec![false; self.num_transitions]; + // Transitions with no input places remain enabled (source transitions). for &(p, t) in &self.place_to_transition { - has_input[t] = true; if marking[p] == 0 { enabled[t] = false; } } - // Transitions with no input places are always enabled (source transitions). - // They remain true in the enabled vector. - // But we need to handle the case where has_input is false: leave enabled as true. - let _ = has_input; // used implicitly above enabled } @@ -401,12 +401,10 @@ impl Problem for NonLivenessFreePetriNet { return Or(false); } - let globally_dead = self.compute_globally_dead_transitions(); - // Config selects transitions claimed to be dead. // Return true iff at least one selected transition is indeed globally dead. for (t, &selected) in config.iter().enumerate() { - if selected == 1 && globally_dead[t] { + if selected == 1 && self.globally_dead[t] { return Or(true); } } diff --git a/src/models/misc/subset_product.rs b/src/models/misc/subset_product.rs index 5d558f5c..b82136ed 100644 --- a/src/models/misc/subset_product.rs +++ b/src/models/misc/subset_product.rs @@ -51,9 +51,9 @@ inventory::submit! { /// ``` #[derive(Debug, Clone, Serialize, Deserialize)] pub struct SubsetProduct { - #[serde(with = "decimal_biguint_vec")] + #[serde(with = "super::biguint_serde::decimal_biguint_vec")] sizes: Vec, - #[serde(with = "decimal_biguint")] + #[serde(with = "super::biguint_serde::decimal_biguint")] target: BigUint, } @@ -129,6 +129,9 @@ impl Problem for SubsetProduct { for (i, &x) in config.iter().enumerate() { if x == 1 { product *= &self.sizes[i]; + if product > self.target { + return crate::types::Or(false); + } } } product == self.target @@ -140,68 +143,6 @@ crate::declare_variants! { default SubsetProduct => "2^num_elements", } -mod decimal_biguint { - use super::BigUint; - use serde::de::Error; - use serde::{Deserialize, Deserializer, Serializer}; - - #[derive(Deserialize)] - #[serde(untagged)] - pub(super) enum Repr { - String(String), - U64(u64), - I64(i64), - } - - pub(super) fn parse_repr(value: Repr) -> Result { - match value { - Repr::String(s) => BigUint::parse_bytes(s.as_bytes(), 10) - .ok_or_else(|| E::custom(format!("invalid decimal integer: {s}"))), - Repr::U64(n) => Ok(BigUint::from(n)), - Repr::I64(n) if n >= 0 => Ok(BigUint::from(n as u64)), - Repr::I64(n) => Err(E::custom(format!("expected nonnegative integer, got {n}"))), - } - } - - pub fn serialize(value: &BigUint, serializer: S) -> Result - where - S: Serializer, - { - serializer.serialize_str(&value.to_str_radix(10)) - } - - pub fn deserialize<'de, D>(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - parse_repr(Repr::deserialize(deserializer)?) - } -} - -mod decimal_biguint_vec { - use super::BigUint; - use serde::{Deserialize, Deserializer, Serialize, Serializer}; - - pub fn serialize(values: &[BigUint], serializer: S) -> Result - where - S: Serializer, - { - let strings: Vec = values.iter().map(ToString::to_string).collect(); - strings.serialize(serializer) - } - - pub fn deserialize<'de, D>(deserializer: D) -> Result, D::Error> - where - D: Deserializer<'de>, - { - let values = Vec::::deserialize(deserializer)?; - values - .into_iter() - .map(super::decimal_biguint::parse_repr::) - .collect() - } -} - #[cfg(feature = "example-db")] pub(crate) fn canonical_model_example_specs() -> Vec { // 6 elements [2,3,5,7,6,10], target 210 → select {2,3,5,7} diff --git a/src/models/misc/subset_sum.rs b/src/models/misc/subset_sum.rs index 873233e9..d0346613 100644 --- a/src/models/misc/subset_sum.rs +++ b/src/models/misc/subset_sum.rs @@ -51,9 +51,9 @@ inventory::submit! { /// ``` #[derive(Debug, Clone, Serialize, Deserialize)] pub struct SubsetSum { - #[serde(with = "decimal_biguint_vec")] + #[serde(with = "super::biguint_serde::decimal_biguint_vec")] sizes: Vec, - #[serde(with = "decimal_biguint")] + #[serde(with = "super::biguint_serde::decimal_biguint")] target: BigUint, } @@ -141,68 +141,6 @@ crate::declare_variants! { default SubsetSum => "2^(num_elements / 2)", } -mod decimal_biguint { - use super::BigUint; - use serde::de::Error; - use serde::{Deserialize, Deserializer, Serializer}; - - #[derive(Deserialize)] - #[serde(untagged)] - pub(super) enum Repr { - String(String), - U64(u64), - I64(i64), - } - - pub(super) fn parse_repr(value: Repr) -> Result { - match value { - Repr::String(s) => BigUint::parse_bytes(s.as_bytes(), 10) - .ok_or_else(|| E::custom(format!("invalid decimal integer: {s}"))), - Repr::U64(n) => Ok(BigUint::from(n)), - Repr::I64(n) if n >= 0 => Ok(BigUint::from(n as u64)), - Repr::I64(n) => Err(E::custom(format!("expected nonnegative integer, got {n}"))), - } - } - - pub fn serialize(value: &BigUint, serializer: S) -> Result - where - S: Serializer, - { - serializer.serialize_str(&value.to_str_radix(10)) - } - - pub fn deserialize<'de, D>(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - parse_repr(Repr::deserialize(deserializer)?) - } -} - -mod decimal_biguint_vec { - use super::BigUint; - use serde::{Deserialize, Deserializer, Serialize, Serializer}; - - pub fn serialize(values: &[BigUint], serializer: S) -> Result - where - S: Serializer, - { - let strings: Vec = values.iter().map(ToString::to_string).collect(); - strings.serialize(serializer) - } - - pub fn deserialize<'de, D>(deserializer: D) -> Result, D::Error> - where - D: Deserializer<'de>, - { - let values = Vec::::deserialize(deserializer)?; - values - .into_iter() - .map(super::decimal_biguint::parse_repr::) - .collect() - } -} - #[cfg(feature = "example-db")] pub(crate) fn canonical_model_example_specs() -> Vec { // 6 elements [3,7,1,8,2,4], target 11 → select {3,8} From c1e4e897f353f281559d02edb5470a9a4c8895d6 Mon Sep 17 00:00:00 2001 From: Xiwei Pan Date: Tue, 31 Mar 2026 14:42:58 +0800 Subject: [PATCH 21/21] fix: address agentic review findings for PR #960 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Must-fix: - S1: Add 4 missing re-exports in models/mod.rs and lib.rs prelude (QuadraticCongruences, OpenShopScheduling, PreemptiveScheduling, SequencingWithDeadlinesAndSetUpTimes) - S2: Add OpenShopScheduling CLI create handler - S3: Fix DirectedHamiltonianPath→ILP overhead (2n→3n constraints) - S4/Q1: Fix PreemptiveScheduling→ILP overhead (num_tasks→d_max) - S5: Fix SequencingWithDeadlinesAndSetUpTimes→ILP overhead (3*(n-1) → n^2*(n-1) switch detection constraints) Should-fix: - S7: Fix MinimumInternalMacroDataCompression→ILP overhead (2n+1 → n+1 constraints) - Q2: Fix setup_times field description ("away from" → "to") - Q3: Extract duplicated decode_permutation to shared misc/mod.rs Low: - A1: Remove unused EquilibriumPoint import in create.rs - A2: Fix redundant closure in create.rs Co-Authored-By: Claude Opus 4.6 (1M context) --- problemreductions-cli/src/commands/create.rs | 68 +++++++++++++++---- src/lib.rs | 14 ++-- src/models/misc/mod.rs | 18 +++++ ...equencing_to_minimize_tardy_task_weight.rs | 20 +----- ...uencing_with_deadlines_and_set_up_times.rs | 22 +----- src/models/mod.rs | 17 ++--- src/rules/directedhamiltonianpath_ilp.rs | 2 +- ...minimuminternalmacrodatacompression_ilp.rs | 2 +- src/rules/preemptivescheduling_ilp.rs | 4 +- ...equencingwithdeadlinesandsetuptimes_ilp.rs | 2 +- 10 files changed, 97 insertions(+), 72 deletions(-) diff --git a/problemreductions-cli/src/commands/create.rs b/problemreductions-cli/src/commands/create.rs index cd3eaca4..822479f1 100644 --- a/problemreductions-cli/src/commands/create.rs +++ b/problemreductions-cli/src/commands/create.rs @@ -9,10 +9,9 @@ use anyhow::{bail, Context, Result}; use problemreductions::export::{ModelExample, ProblemRef, ProblemSide, RuleExample}; use problemreductions::models::algebraic::{ AlgebraicEquationsOverGF2, ClosestVectorProblem, ConsecutiveBlockMinimization, - ConsecutiveOnesMatrixAugmentation, ConsecutiveOnesSubmatrix, EquilibriumPoint, - FeasibleBasisExtension, MinimumMatrixDomination, MinimumWeightSolutionToLinearEquations, - QuadraticCongruences, QuadraticDiophantineEquations, SimultaneousIncongruences, - SparseMatrixCompression, BMF, + ConsecutiveOnesMatrixAugmentation, ConsecutiveOnesSubmatrix, FeasibleBasisExtension, + MinimumMatrixDomination, MinimumWeightSolutionToLinearEquations, QuadraticCongruences, + QuadraticDiophantineEquations, SimultaneousIncongruences, SparseMatrixCompression, BMF, }; use problemreductions::models::formula::Quantifier; use problemreductions::models::graph::{ @@ -31,15 +30,15 @@ use problemreductions::models::misc::{ IntegerExpressionMembership, JobShopScheduling, KnownValue, KthLargestMTuple, LongestCommonSubsequence, MinimumExternalMacroDataCompression, MinimumInternalMacroDataCompression, MinimumTardinessSequencing, MultiprocessorScheduling, - NonLivenessFreePetriNet, Numerical3DimensionalMatching, PaintShop, PartiallyOrderedKnapsack, - PreemptiveScheduling, ProductionPlanning, QueryArg, RectilinearPictureCompression, - RegisterSufficiency, ResourceConstrainedScheduling, SchedulingToMinimizeWeightedCompletionTime, - SchedulingWithIndividualDeadlines, SequencingToMinimizeMaximumCumulativeCost, - SequencingToMinimizeTardyTaskWeight, SequencingToMinimizeWeightedCompletionTime, - SequencingToMinimizeWeightedTardiness, SequencingWithDeadlinesAndSetUpTimes, - SequencingWithReleaseTimesAndDeadlines, SequencingWithinIntervals, ShortestCommonSupersequence, - StringToStringCorrection, SubsetProduct, SubsetSum, SumOfSquaresPartition, ThreePartition, - TimetableDesign, + NonLivenessFreePetriNet, Numerical3DimensionalMatching, OpenShopScheduling, PaintShop, + PartiallyOrderedKnapsack, PreemptiveScheduling, ProductionPlanning, QueryArg, + RectilinearPictureCompression, RegisterSufficiency, ResourceConstrainedScheduling, + SchedulingToMinimizeWeightedCompletionTime, SchedulingWithIndividualDeadlines, + SequencingToMinimizeMaximumCumulativeCost, SequencingToMinimizeTardyTaskWeight, + SequencingToMinimizeWeightedCompletionTime, SequencingToMinimizeWeightedTardiness, + SequencingWithDeadlinesAndSetUpTimes, SequencingWithReleaseTimesAndDeadlines, + SequencingWithinIntervals, ShortestCommonSupersequence, StringToStringCorrection, + SubsetProduct, SubsetSum, SumOfSquaresPartition, ThreePartition, TimetableDesign, }; use problemreductions::models::BiconnectivityAugmentation; use problemreductions::prelude::*; @@ -893,6 +892,7 @@ fn help_flag_name(canonical: &str, field_name: &str) -> String { ("BoundedComponentSpanningForest", "max_weight") => return "bound".to_string(), ("FlowShopScheduling", "num_processors") | ("JobShopScheduling", "num_processors") + | ("OpenShopScheduling", "num_machines") | ("SchedulingWithIndividualDeadlines", "num_processors") => { return "num-processors/--m".to_string(); } @@ -2970,7 +2970,7 @@ pub fn create(args: &CreateArgs, out: &OutputConfig) -> Result<()> { })?; let sets: Vec> = sets_str .split(';') - .map(|group| util::parse_comma_list(group)) + .map(util::parse_comma_list) .collect::>()?; ( ser(KthLargestMTuple::try_new(sets, k_val as u64, bound) @@ -4829,6 +4829,46 @@ pub fn create(args: &CreateArgs, out: &OutputConfig) -> Result<()> { ) } + // OpenShopScheduling + "OpenShopScheduling" => { + let task_str = args.task_lengths.as_deref().ok_or_else(|| { + anyhow::anyhow!( + "OpenShopScheduling requires --task-lengths and --num-processors\n\n\ + Usage: pred create OpenShopScheduling --task-lengths \"1,2;2,1\" --num-processors 2" + ) + })?; + let task_lengths: Vec> = task_str + .split(';') + .map(|row| util::parse_comma_list(row.trim())) + .collect::>>()?; + let num_machines = resolve_processor_count_flags( + "OpenShopScheduling", + "Usage: pred create OpenShopScheduling --task-lengths \"1,2;2,1\" --num-processors 2", + args.num_processors, + args.m, + )? + .or_else(|| task_lengths.first().map(Vec::len)) + .ok_or_else(|| { + anyhow::anyhow!( + "Cannot infer num_processors from empty task list; use --num-processors" + ) + })?; + for (j, row) in task_lengths.iter().enumerate() { + if row.len() != num_machines { + bail!( + "task_lengths row {} has {} entries, expected {} (num_machines)", + j, + row.len(), + num_machines + ); + } + } + ( + ser(OpenShopScheduling::new(num_machines, task_lengths))?, + resolved_variant.clone(), + ) + } + // StaffScheduling "StaffScheduling" => { let usage = "Usage: pred create StaffScheduling --schedules \"1,1,1,1,1,0,0;0,1,1,1,1,1,0;0,0,1,1,1,1,1;1,0,0,1,1,1,1;1,1,0,0,1,1,1\" --requirements 2,2,2,3,3,2,1 --num-workers 4 --k 5"; diff --git a/src/lib.rs b/src/lib.rs index 42ec62d0..e30da8f0 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -43,7 +43,8 @@ pub mod variant; pub mod prelude { // Problem types pub use crate::models::algebraic::{ - ConsecutiveOnesMatrixAugmentation, QuadraticAssignment, SparseMatrixCompression, BMF, QUBO, + ConsecutiveOnesMatrixAugmentation, QuadraticAssignment, QuadraticCongruences, + SparseMatrixCompression, BMF, QUBO, }; pub use crate::models::formula::{ CNFClause, CircuitSAT, KSatisfiability, NAESatisfiability, NonTautology, @@ -75,11 +76,12 @@ pub mod prelude { ConjunctiveBooleanQuery, ConjunctiveQueryFoldability, ConsistencyOfDatabaseFrequencyTables, CosineProductIntegration, EnsembleComputation, ExpectedRetrievalCost, Factoring, FlowShopScheduling, GroupingBySwapping, JobShopScheduling, Knapsack, - LongestCommonSubsequence, MinimumTardinessSequencing, MultiprocessorScheduling, PaintShop, - Partition, ProductionPlanning, QueryArg, RectilinearPictureCompression, - ResourceConstrainedScheduling, SchedulingWithIndividualDeadlines, - SequencingToMinimizeMaximumCumulativeCost, SequencingToMinimizeWeightedCompletionTime, - SequencingToMinimizeWeightedTardiness, SequencingWithReleaseTimesAndDeadlines, + LongestCommonSubsequence, MinimumTardinessSequencing, MultiprocessorScheduling, + OpenShopScheduling, PaintShop, Partition, PreemptiveScheduling, ProductionPlanning, + QueryArg, RectilinearPictureCompression, ResourceConstrainedScheduling, + SchedulingWithIndividualDeadlines, SequencingToMinimizeMaximumCumulativeCost, + SequencingToMinimizeWeightedCompletionTime, SequencingToMinimizeWeightedTardiness, + SequencingWithDeadlinesAndSetUpTimes, SequencingWithReleaseTimesAndDeadlines, SequencingWithinIntervals, ShortestCommonSupersequence, StackerCrane, StaffScheduling, StringToStringCorrection, SubsetSum, SumOfSquaresPartition, Term, ThreePartition, TimetableDesign, diff --git a/src/models/misc/mod.rs b/src/models/misc/mod.rs index 54591f19..0ebc9365 100644 --- a/src/models/misc/mod.rs +++ b/src/models/misc/mod.rs @@ -77,6 +77,24 @@ pub(crate) fn decode_lehmer(config: &[usize], n: usize) -> Option> { Some(schedule) } +/// Decode a direct permutation configuration. +/// +/// Returns `Some(schedule)` if `config` is a valid permutation of `0..n`, +/// or `None` otherwise. +pub(crate) fn decode_permutation(config: &[usize], n: usize) -> Option> { + if config.len() != n { + return None; + } + let mut seen = vec![false; n]; + for &task in config { + if task >= n || seen[task] { + return None; + } + seen[task] = true; + } + Some(config.to_vec()) +} + /// Return the Lehmer-code dimension vector `[n, n-1, ..., 1]`. pub(crate) fn lehmer_dims(n: usize) -> Vec { (0..n).rev().map(|i| i + 1).collect() diff --git a/src/models/misc/sequencing_to_minimize_tardy_task_weight.rs b/src/models/misc/sequencing_to_minimize_tardy_task_weight.rs index c91d3339..379dac87 100644 --- a/src/models/misc/sequencing_to_minimize_tardy_task_weight.rs +++ b/src/models/misc/sequencing_to_minimize_tardy_task_weight.rs @@ -103,24 +103,6 @@ impl SequencingToMinimizeTardyTaskWeight { &self.deadlines } - /// Decode a direct permutation configuration. - /// - /// Returns the schedule as `Some(Vec)` if the config is a valid - /// permutation of `0..n`, or `None` otherwise. - fn decode_permutation(config: &[usize], n: usize) -> Option> { - if config.len() != n { - return None; - } - let mut seen = vec![false; n]; - for &task in config { - if task >= n || seen[task] { - return None; - } - seen[task] = true; - } - Some(config.to_vec()) - } - fn tardy_task_weight(&self, schedule: &[usize]) -> Min { let mut elapsed: u64 = 0; let mut total: u64 = 0; @@ -176,7 +158,7 @@ impl Problem for SequencingToMinimizeTardyTaskWeight { fn evaluate(&self, config: &[usize]) -> Min { let n = self.num_tasks(); - let Some(schedule) = Self::decode_permutation(config, n) else { + let Some(schedule) = super::decode_permutation(config, n) else { return Min(None); }; self.tardy_task_weight(&schedule) diff --git a/src/models/misc/sequencing_with_deadlines_and_set_up_times.rs b/src/models/misc/sequencing_with_deadlines_and_set_up_times.rs index d222997c..3b14e6bc 100644 --- a/src/models/misc/sequencing_with_deadlines_and_set_up_times.rs +++ b/src/models/misc/sequencing_with_deadlines_and_set_up_times.rs @@ -22,7 +22,7 @@ inventory::submit! { FieldInfo { name: "lengths", type_name: "Vec", description: "Processing time for each task" }, FieldInfo { name: "deadlines", type_name: "Vec", description: "Deadline d(t) for each task" }, FieldInfo { name: "compilers", type_name: "Vec", description: "Compiler index k(t) for each task" }, - FieldInfo { name: "setup_times", type_name: "Vec", description: "Setup time s(c) charged when switching away from compiler c" }, + FieldInfo { name: "setup_times", type_name: "Vec", description: "Setup time s(c) charged when switching to compiler c" }, ], } } @@ -135,24 +135,6 @@ impl SequencingWithDeadlinesAndSetUpTimes { &self.setup_times } - /// Decode a direct permutation configuration. - /// - /// Returns `Some(schedule)` if the config is a valid permutation of `0..n`, - /// or `None` otherwise. - fn decode_permutation(config: &[usize], n: usize) -> Option> { - if config.len() != n { - return None; - } - let mut seen = vec![false; n]; - for &task in config { - if task >= n || seen[task] { - return None; - } - seen[task] = true; - } - Some(config.to_vec()) - } - /// Check whether a schedule meets all deadlines. /// /// Returns `true` iff every task in the schedule completes by its deadline. @@ -224,7 +206,7 @@ impl Problem for SequencingWithDeadlinesAndSetUpTimes { fn evaluate(&self, config: &[usize]) -> Or { let n = self.num_tasks(); - let Some(schedule) = Self::decode_permutation(config, n) else { + let Some(schedule) = super::decode_permutation(config, n) else { return Or(false); }; Or(self.all_deadlines_met(&schedule)) diff --git a/src/models/mod.rs b/src/models/mod.rs index 7c1cadf5..e920df18 100644 --- a/src/models/mod.rs +++ b/src/models/mod.rs @@ -13,8 +13,8 @@ pub use algebraic::{ AlgebraicEquationsOverGF2, ClosestVectorProblem, ConsecutiveBlockMinimization, ConsecutiveOnesMatrixAugmentation, ConsecutiveOnesSubmatrix, EquilibriumPoint, FeasibleBasisExtension, MinimumMatrixDomination, MinimumWeightSolutionToLinearEquations, - QuadraticAssignment, QuadraticDiophantineEquations, SimultaneousIncongruences, - SparseMatrixCompression, BMF, ILP, QUBO, + QuadraticAssignment, QuadraticCongruences, QuadraticDiophantineEquations, + SimultaneousIncongruences, SparseMatrixCompression, BMF, ILP, QUBO, }; pub use formula::{ CNFClause, CircuitSAT, KSatisfiability, NAESatisfiability, NonTautology, @@ -51,12 +51,13 @@ pub use misc::{ GroupingBySwapping, IntExpr, IntegerExpressionMembership, JobShopScheduling, Knapsack, KthLargestMTuple, LongestCommonSubsequence, MinimumExternalMacroDataCompression, MinimumInternalMacroDataCompression, MinimumTardinessSequencing, MultiprocessorScheduling, - NonLivenessFreePetriNet, Numerical3DimensionalMatching, PaintShop, Partition, - PrecedenceConstrainedScheduling, ProductionPlanning, QueryArg, RectilinearPictureCompression, - RegisterSufficiency, ResourceConstrainedScheduling, SchedulingToMinimizeWeightedCompletionTime, - SchedulingWithIndividualDeadlines, SequencingToMinimizeMaximumCumulativeCost, - SequencingToMinimizeTardyTaskWeight, SequencingToMinimizeWeightedCompletionTime, - SequencingToMinimizeWeightedTardiness, SequencingWithReleaseTimesAndDeadlines, + NonLivenessFreePetriNet, Numerical3DimensionalMatching, OpenShopScheduling, PaintShop, + Partition, PrecedenceConstrainedScheduling, PreemptiveScheduling, ProductionPlanning, QueryArg, + RectilinearPictureCompression, RegisterSufficiency, ResourceConstrainedScheduling, + SchedulingToMinimizeWeightedCompletionTime, SchedulingWithIndividualDeadlines, + SequencingToMinimizeMaximumCumulativeCost, SequencingToMinimizeTardyTaskWeight, + SequencingToMinimizeWeightedCompletionTime, SequencingToMinimizeWeightedTardiness, + SequencingWithDeadlinesAndSetUpTimes, SequencingWithReleaseTimesAndDeadlines, SequencingWithinIntervals, ShortestCommonSupersequence, StackerCrane, StaffScheduling, StringToStringCorrection, SubsetProduct, SubsetSum, SumOfSquaresPartition, Term, ThreePartition, TimetableDesign, diff --git a/src/rules/directedhamiltonianpath_ilp.rs b/src/rules/directedhamiltonianpath_ilp.rs index 33a7040c..f9cb0152 100644 --- a/src/rules/directedhamiltonianpath_ilp.rs +++ b/src/rules/directedhamiltonianpath_ilp.rs @@ -43,7 +43,7 @@ impl ReductionResult for ReductionDirectedHamiltonianPathToILP { #[reduction( overhead = { num_vars = "num_vertices^2", - num_constraints = "2 * num_vertices + (num_vertices - 1) * (num_vertices^2 - num_arcs)", + num_constraints = "3 * num_vertices + (num_vertices - 1) * (num_vertices^2 - num_arcs)", } )] impl ReduceTo> for DirectedHamiltonianPath { diff --git a/src/rules/minimuminternalmacrodatacompression_ilp.rs b/src/rules/minimuminternalmacrodatacompression_ilp.rs index 287863b4..fe442a3e 100644 --- a/src/rules/minimuminternalmacrodatacompression_ilp.rs +++ b/src/rules/minimuminternalmacrodatacompression_ilp.rs @@ -155,7 +155,7 @@ impl ReductionResult for ReductionIMDCToILP { #[reduction( overhead = { num_vars = "string_len + string_len ^ 3", - num_constraints = "string_len + 1 + string_len", + num_constraints = "string_len + 1", } )] impl ReduceTo> for MinimumInternalMacroDataCompression { diff --git a/src/rules/preemptivescheduling_ilp.rs b/src/rules/preemptivescheduling_ilp.rs index 28107c4c..3c068ec7 100644 --- a/src/rules/preemptivescheduling_ilp.rs +++ b/src/rules/preemptivescheduling_ilp.rs @@ -59,8 +59,8 @@ impl ReductionResult for ReductionPSToILP { #[reduction( overhead = { - num_vars = "num_tasks * num_tasks + 1", - num_constraints = "num_tasks + num_tasks * num_tasks + num_precedences * num_tasks + num_tasks * num_tasks", + num_vars = "num_tasks * d_max + 1", + num_constraints = "num_tasks + d_max + num_precedences * d_max + 2 * num_tasks * d_max", } )] impl ReduceTo> for PreemptiveScheduling { diff --git a/src/rules/sequencingwithdeadlinesandsetuptimes_ilp.rs b/src/rules/sequencingwithdeadlinesandsetuptimes_ilp.rs index 1868c29e..711a5ea9 100644 --- a/src/rules/sequencingwithdeadlinesandsetuptimes_ilp.rs +++ b/src/rules/sequencingwithdeadlinesandsetuptimes_ilp.rs @@ -45,7 +45,7 @@ impl ReductionResult for ReductionSWDSTToILP { #[reduction(overhead = { num_vars = "num_tasks * num_tasks + (num_tasks - 1) + num_tasks * (num_tasks - 1)", - num_constraints = "2 * num_tasks + 3 * (num_tasks - 1) + 3 * num_tasks * (num_tasks - 1) + num_tasks * num_tasks", + num_constraints = "2 * num_tasks + num_tasks^2 * (num_tasks - 1) + 3 * num_tasks * (num_tasks - 1) + num_tasks * num_tasks", })] impl ReduceTo> for SequencingWithDeadlinesAndSetUpTimes { type Result = ReductionSWDSTToILP;