From b7286490f788e12c83c61efb6ef99aa235aa829e Mon Sep 17 00:00:00 2001 From: zazabap Date: Tue, 31 Mar 2026 08:08:44 +0000 Subject: [PATCH 1/3] feat: add 10 reduction rules from #770 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add 10 new reduction rules connecting existing problem models: Tier 1a (simple): - Partition → BinPacking (#396): capacity=S/2, identity extraction - ExactCoverBy3Sets → MaximumSetPacking (#823): identity transformation - NAESatisfiability → MaxCut (#821): literal-pair edges + variable edges ThreePartition scheduling (5 rules): - ThreePartition → ResourceConstrainedScheduling (#477): 3 processors, resource=size - ThreePartition → SequencingWithReleaseTimesAndDeadlines (#469): filler-task slots - ThreePartition → SequencingToMinimizeWeightedTardiness (#473): filler-task slots - ThreePartition → FlowShopScheduling (#482): 3-machine separators - ThreePartition → JobShopScheduling (#485): 2-processor separators ILP/graph: - ILP/i32 → ILP/bool (#769): FBBT + truncated binary encoding - MaxCut → MinimumCutIntoBoundedSets (#849): complement graph bisection Also adds num_literal_pairs() getter to NAESatisfiability model and updates dominated-rules allow-list and path parity tests. Co-Authored-By: Claude Opus 4.6 (1M context) --- src/models/formula/nae_satisfiability.rs | 10 + .../exactcoverby3sets_maximumsetpacking.rs | 82 ++++ src/rules/ilp_i32_ilp_bool.rs | 356 ++++++++++++++++++ src/rules/maxcut_minimumcutintoboundedsets.rs | 137 +++++++ src/rules/mod.rs | 2 + src/rules/naesatisfiability_maxcut.rs | 143 +++++++ src/rules/partition_binpacking.rs | 91 +++++ .../threepartition_flowshopscheduling.rs | 163 ++++++++ src/rules/threepartition_jobshopscheduling.rs | 215 +++++++++++ ...partition_resourceconstrainedscheduling.rs | 93 +++++ ...n_sequencingtominimizeweightedtardiness.rs | 138 +++++++ ..._sequencingwithreleasetimesanddeadlines.rs | 164 ++++++++ src/unit_tests/rules/analysis.rs | 4 +- .../exactcoverby3sets_maximumsetpacking.rs | 83 ++++ src/unit_tests/rules/ilp_i32_ilp_bool.rs | 224 +++++++++++ .../rules/maxcut_minimumcutintoboundedsets.rs | 146 +++++++ .../rules/naesatisfiability_maxcut.rs | 164 ++++++++ src/unit_tests/rules/partition_binpacking.rs | 59 +++ .../threepartition_flowshopscheduling.rs | 185 +++++++++ .../rules/threepartition_jobshopscheduling.rs | 218 +++++++++++ ...partition_resourceconstrainedscheduling.rs | 126 +++++++ ...n_sequencingtominimizeweightedtardiness.rs | 176 +++++++++ ..._sequencingwithreleasetimesanddeadlines.rs | 94 +++++ 23 files changed, 3072 insertions(+), 1 deletion(-) create mode 100644 src/rules/exactcoverby3sets_maximumsetpacking.rs create mode 100644 src/rules/ilp_i32_ilp_bool.rs create mode 100644 src/rules/maxcut_minimumcutintoboundedsets.rs create mode 100644 src/rules/naesatisfiability_maxcut.rs create mode 100644 src/rules/partition_binpacking.rs create mode 100644 src/rules/threepartition_flowshopscheduling.rs create mode 100644 src/rules/threepartition_jobshopscheduling.rs create mode 100644 src/rules/threepartition_resourceconstrainedscheduling.rs create mode 100644 src/rules/threepartition_sequencingtominimizeweightedtardiness.rs create mode 100644 src/rules/threepartition_sequencingwithreleasetimesanddeadlines.rs create mode 100644 src/unit_tests/rules/exactcoverby3sets_maximumsetpacking.rs create mode 100644 src/unit_tests/rules/ilp_i32_ilp_bool.rs create mode 100644 src/unit_tests/rules/maxcut_minimumcutintoboundedsets.rs create mode 100644 src/unit_tests/rules/naesatisfiability_maxcut.rs create mode 100644 src/unit_tests/rules/partition_binpacking.rs create mode 100644 src/unit_tests/rules/threepartition_flowshopscheduling.rs create mode 100644 src/unit_tests/rules/threepartition_jobshopscheduling.rs create mode 100644 src/unit_tests/rules/threepartition_resourceconstrainedscheduling.rs create mode 100644 src/unit_tests/rules/threepartition_sequencingtominimizeweightedtardiness.rs create mode 100644 src/unit_tests/rules/threepartition_sequencingwithreleasetimesanddeadlines.rs diff --git a/src/models/formula/nae_satisfiability.rs b/src/models/formula/nae_satisfiability.rs index c65a6b31..5e79f2de 100644 --- a/src/models/formula/nae_satisfiability.rs +++ b/src/models/formula/nae_satisfiability.rs @@ -69,6 +69,16 @@ impl NAESatisfiability { self.clauses.iter().map(|c| c.len()).sum() } + /// Get the total number of literal pairs across all clauses. + /// + /// For each clause with k literals, this contributes C(k,2) = k*(k-1)/2 pairs. + pub fn num_literal_pairs(&self) -> usize { + self.clauses + .iter() + .map(|c| c.len() * (c.len() - 1) / 2) + .sum() + } + /// Get the clauses. pub fn clauses(&self) -> &[CNFClause] { &self.clauses diff --git a/src/rules/exactcoverby3sets_maximumsetpacking.rs b/src/rules/exactcoverby3sets_maximumsetpacking.rs new file mode 100644 index 00000000..8718485c --- /dev/null +++ b/src/rules/exactcoverby3sets_maximumsetpacking.rs @@ -0,0 +1,82 @@ +//! Reduction from ExactCoverBy3Sets to MaximumSetPacking. +//! +//! Given an X3C instance with universe X (|X| = 3q) and collection C of +//! 3-element subsets, construct a MaximumSetPacking instance where each +//! triple becomes a variable-length set with unit weight. An exact cover +//! of q disjoint triples corresponds to a maximum packing of value q. + +use crate::models::set::{ExactCoverBy3Sets, MaximumSetPacking}; +use crate::reduction; +use crate::rules::traits::{ReduceTo, ReductionResult}; +use crate::types::One; + +/// Result of reducing ExactCoverBy3Sets to MaximumSetPacking. +#[derive(Debug, Clone)] +pub struct ReductionXC3SToMaximumSetPacking { + target: MaximumSetPacking, +} + +impl ReductionResult for ReductionXC3SToMaximumSetPacking { + type Source = ExactCoverBy3Sets; + type Target = MaximumSetPacking; + + fn target_problem(&self) -> &MaximumSetPacking { + &self.target + } + + /// Extract X3C solution from MaximumSetPacking solution. + /// + /// The configuration is identity (same binary selection vector). + /// A packing of q disjoint 3-sets over a 3q-element universe is necessarily + /// an exact cover, so no additional checking is needed. + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + target_solution.to_vec() + } +} + +#[reduction(overhead = { + num_sets = "num_subsets", +})] +impl ReduceTo> for ExactCoverBy3Sets { + type Result = ReductionXC3SToMaximumSetPacking; + + fn reduce_to(&self) -> Self::Result { + let sets: Vec> = self + .subsets() + .iter() + .map(|triple| triple.to_vec()) + .collect(); + + ReductionXC3SToMaximumSetPacking { + target: MaximumSetPacking::::new(sets), + } + } +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_rule_example_specs() -> Vec { + use crate::export::SolutionPair; + + vec![crate::example_db::specs::RuleExampleSpec { + id: "exactcoverby3sets_to_maximumsetpacking", + build: || { + // Universe {0,1,2,3,4,5}, subsets [{0,1,2}, {0,1,3}, {3,4,5}, {2,4,5}, {1,3,5}] + // Exact cover: S0={0,1,2} + S2={3,4,5} + let source = ExactCoverBy3Sets::new( + 6, + vec![[0, 1, 2], [0, 1, 3], [3, 4, 5], [2, 4, 5], [1, 3, 5]], + ); + crate::example_db::specs::rule_example_with_witness::<_, MaximumSetPacking>( + source, + SolutionPair { + source_config: vec![1, 0, 1, 0, 0], + target_config: vec![1, 0, 1, 0, 0], + }, + ) + }, + }] +} + +#[cfg(test)] +#[path = "../unit_tests/rules/exactcoverby3sets_maximumsetpacking.rs"] +mod tests; diff --git a/src/rules/ilp_i32_ilp_bool.rs b/src/rules/ilp_i32_ilp_bool.rs new file mode 100644 index 00000000..6577cb5e --- /dev/null +++ b/src/rules/ilp_i32_ilp_bool.rs @@ -0,0 +1,356 @@ +//! Reduction from ILP to ILP via truncated binary encoding with FBBT. +//! +//! Uses Feasibility-Based Bound Tightening (Savelsbergh 1994, Achterberg et al. 2020) +//! to infer per-variable upper bounds, then encodes each integer variable into +//! ceil(log2(U+1)) binary variables using truncated binary encoding (Karimi & Rosenberg 2017). + +use crate::models::algebraic::{Comparison, LinearConstraint, ILP}; +use crate::reduction; +use crate::rules::traits::{ReduceTo, ReductionResult}; + +/// Error type for FBBT failures. +#[derive(Debug, Clone, PartialEq)] +pub enum FbbtError { + /// At least one variable has an unbounded upper bound after FBBT. + Unbounded, + /// The constraint system is provably infeasible. + Infeasible, +} + +/// Per-variable encoding info: start index in binary variables, weights. +#[derive(Debug, Clone)] +struct VarEncoding { + /// Index of the first binary variable for this integer variable. + start: usize, + /// Weights for each binary variable: [1, 2, 4, ..., remainder]. + weights: Vec, +} + +/// Infer upper bounds for non-negative integer variables via FBBT. +/// +/// Returns `Ok(bounds)` with finite upper bounds, or an error if the system +/// is infeasible or unbounded. +fn fbbt(num_vars: usize, constraints: &[LinearConstraint]) -> Result, FbbtError> { + const INF: i64 = i64::MAX / 2; // sentinel for +infinity (safe for addition) + + let mut lower = vec![0i64; num_vars]; + let mut upper = vec![INF; num_vars]; + + let max_iters = num_vars + 1; + + for _ in 0..max_iters { + let mut changed = false; + + for c in constraints { + // Compute activity bounds: act_min = sum of min contributions, act_max = sum of max contributions + let mut act_min: i64 = 0; + let mut act_max: i64 = 0; + let mut act_min_finite = true; + let mut act_max_finite = true; + + for &(var, coef) in &c.terms { + let coef_i = coef as i64; // coefficients are integer-valued in practice + if coef_i > 0 { + act_min = act_min.saturating_add(coef_i.saturating_mul(lower[var])); + if upper[var] >= INF { + act_max_finite = false; + } else { + act_max = act_max.saturating_add(coef_i.saturating_mul(upper[var])); + } + } else if coef_i < 0 { + if upper[var] >= INF { + act_min_finite = false; + } else { + act_min = act_min.saturating_add(coef_i.saturating_mul(upper[var])); + } + act_max = act_max.saturating_add(coef_i.saturating_mul(lower[var])); + } + } + + let rhs = c.rhs as i64; + + // Infeasibility checks + if matches!(c.cmp, Comparison::Le | Comparison::Eq) && act_min_finite && act_min > rhs { + return Err(FbbtError::Infeasible); + } + if matches!(c.cmp, Comparison::Ge | Comparison::Eq) && act_max_finite && act_max < rhs { + return Err(FbbtError::Infeasible); + } + + // Tighten each variable + for &(var, coef) in &c.terms { + let coef_i = coef as i64; + if coef_i == 0 { + continue; + } + + // From Le or Eq: upper bound tightening for positive coef, lower bound for negative + if matches!(c.cmp, Comparison::Le | Comparison::Eq) { + // Compute residual min = act_min - this variable's min contribution + let my_min = if coef_i > 0 { + coef_i.saturating_mul(lower[var]) + } else { + if upper[var] >= INF { + continue; // can't compute residual + } + coef_i.saturating_mul(upper[var]) + }; + if !(act_min_finite || coef_i < 0 && upper[var] >= INF) { + // act_min is -inf, residual is -inf, no useful bound + continue; + } + let res_min = if act_min_finite { + act_min - my_min + } else { + // act_min was -inf because of this var's contribution + // but my_min was the infinite part, so residual is finite + // This case shouldn't produce useful bounds + continue; + }; + + if coef_i > 0 { + // a_i * x_i <= rhs - res_min => x_i <= floor((rhs - res_min) / a_i) + let new_u = floor_div(rhs - res_min, coef_i); + if new_u < upper[var] { + upper[var] = new_u; + changed = true; + } + } else { + // a_i * x_i <= rhs - res_min, a_i < 0 => x_i >= ceil((rhs - res_min) / a_i) + let new_l = ceil_div(rhs - res_min, coef_i); + if new_l > lower[var] { + lower[var] = new_l; + changed = true; + } + } + } + + // From Ge or Eq: lower bound tightening for positive coef, upper for negative + if matches!(c.cmp, Comparison::Ge | Comparison::Eq) { + let my_max = if coef_i > 0 { + if upper[var] >= INF { + continue; + } + coef_i.saturating_mul(upper[var]) + } else { + coef_i.saturating_mul(lower[var]) + }; + if !(act_max_finite || coef_i > 0 && upper[var] >= INF) { + continue; + } + let res_max = if act_max_finite { + act_max - my_max + } else { + continue; + }; + + if coef_i > 0 { + // a_i * x_i >= rhs - res_max => x_i >= ceil((rhs - res_max) / a_i) + let new_l = ceil_div(rhs - res_max, coef_i); + if new_l > lower[var] { + lower[var] = new_l; + changed = true; + } + } else { + // a_i * x_i >= rhs - res_max, a_i < 0 => x_i <= floor((rhs - res_max) / a_i) + let new_u = floor_div(rhs - res_max, coef_i); + if new_u < upper[var] { + upper[var] = new_u; + changed = true; + } + } + } + + if lower[var] > upper[var] { + return Err(FbbtError::Infeasible); + } + } + } + + if !changed { + break; + } + } + + // Check for unbounded variables + for &u in &upper { + if u >= INF { + return Err(FbbtError::Unbounded); + } + } + + Ok(upper) +} + +/// Floor division that rounds toward negative infinity. +fn floor_div(a: i64, b: i64) -> i64 { + let d = a / b; + let r = a % b; + if (r != 0) && ((r ^ b) < 0) { + d - 1 + } else { + d + } +} + +/// Ceiling division that rounds toward positive infinity. +fn ceil_div(a: i64, b: i64) -> i64 { + let d = a / b; + let r = a % b; + if (r != 0) && ((r ^ b) >= 0) { + d + 1 + } else { + d + } +} + +/// Compute the truncated binary encoding weights for a variable with upper bound U. +/// +/// Returns weights [1, 2, 4, ..., remainder] such that sum of weights = U. +fn binary_weights(upper_bound: i64) -> Vec { + if upper_bound == 0 { + return vec![]; // fixed at 0, no binary variables needed + } + let k = num_bits(upper_bound); + let mut weights = Vec::with_capacity(k); + for j in 0..(k - 1) { + weights.push(1i64 << j); + } + // Last weight: U - (2^{K-1} - 1) + let last = upper_bound - ((1i64 << (k - 1)) - 1); + weights.push(last); + weights +} + +/// Number of binary variables needed: ceil(log2(U + 1)). +fn num_bits(upper_bound: i64) -> usize { + if upper_bound <= 0 { + return 0; + } + // ceil(log2(U + 1)) = floor(log2(U)) + 1 = 64 - leading_zeros(U) + 64 - (upper_bound as u64).leading_zeros() as usize +} + +/// Reduction result for ILP -> ILP. +#[derive(Debug, Clone)] +pub struct ReductionIntILPToBinaryILP { + target: ILP, + /// Per-source-variable encoding info. + encodings: Vec, +} + +impl ReductionResult for ReductionIntILPToBinaryILP { + type Source = ILP; + type Target = ILP; + + fn target_problem(&self) -> &ILP { + &self.target + } + + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + self.encodings + .iter() + .map(|enc| { + let val: i64 = enc + .weights + .iter() + .enumerate() + .map(|(j, &w)| w * target_solution[enc.start + j] as i64) + .sum(); + val as usize + }) + .collect() + } +} + +#[reduction(overhead = { + num_vars = "31 * num_variables", + num_constraints = "num_constraints", +})] +impl ReduceTo> for ILP { + type Result = ReductionIntILPToBinaryILP; + + fn reduce_to(&self) -> Self::Result { + if self.num_vars == 0 { + return ReductionIntILPToBinaryILP { + target: ILP::::new(0, vec![], vec![], self.sense), + encodings: vec![], + }; + } + + // Step 1: FBBT to infer upper bounds + let upper_bounds = match fbbt(self.num_vars, &self.constraints) { + Ok(bounds) => bounds, + Err(FbbtError::Infeasible) => { + // Return an infeasible ILP: 1 variable, constraint y0 >= 1 AND y0 <= 0 + return ReductionIntILPToBinaryILP { + target: ILP::::new( + 1, + vec![ + LinearConstraint::ge(vec![(0, 1.0)], 1.0), + LinearConstraint::le(vec![(0, 1.0)], 0.0), + ], + vec![], + self.sense, + ), + encodings: (0..self.num_vars) + .map(|_| VarEncoding { + start: 0, + weights: vec![], + }) + .collect(), + }; + } + Err(FbbtError::Unbounded) => { + // Fallback: use 31 bits per variable (full i32 range) + vec![(1i64 << 31) - 1; self.num_vars] + } + }; + + // Step 2: Build encodings + let mut encodings = Vec::with_capacity(self.num_vars); + let mut total_bool_vars = 0; + for &u in &upper_bounds { + let weights = binary_weights(u); + encodings.push(VarEncoding { + start: total_bool_vars, + weights: weights.clone(), + }); + total_bool_vars += weights.len(); + } + + // Step 3: Transform constraints + let constraints = self + .constraints + .iter() + .map(|c| { + let mut new_terms = Vec::new(); + for &(var, coef) in &c.terms { + let enc = &encodings[var]; + for (j, &w) in enc.weights.iter().enumerate() { + new_terms.push((enc.start + j, coef * w as f64)); + } + } + LinearConstraint::new(new_terms, c.cmp, c.rhs) + }) + .collect(); + + // Step 4: Transform objective + let mut new_objective = Vec::new(); + for &(var, coef) in &self.objective { + let enc = &encodings[var]; + for (j, &w) in enc.weights.iter().enumerate() { + new_objective.push((enc.start + j, coef * w as f64)); + } + } + + ReductionIntILPToBinaryILP { + target: ILP::::new(total_bool_vars, constraints, new_objective, self.sense), + encodings, + } + } +} + +#[cfg(test)] +#[path = "../unit_tests/rules/ilp_i32_ilp_bool.rs"] +mod tests; diff --git a/src/rules/maxcut_minimumcutintoboundedsets.rs b/src/rules/maxcut_minimumcutintoboundedsets.rs new file mode 100644 index 00000000..e3289b66 --- /dev/null +++ b/src/rules/maxcut_minimumcutintoboundedsets.rs @@ -0,0 +1,137 @@ +//! Reduction from MaxCut to MinimumCutIntoBoundedSets. +//! +//! Transforms a maximum cut problem into a minimum cut into bounded sets problem +//! by padding to even vertex count, building a complete graph with inverted weights, +//! and enforcing balanced bisection via size bounds. +//! +//! Reference: Garey, Johnson, and Stockmeyer (1976), "Some simplified NP-complete +//! graph problems". Garey & Johnson, *Computers and Intractability*, ND17. + +use crate::models::graph::{MaxCut, MinimumCutIntoBoundedSets}; +use crate::reduction; +use crate::rules::traits::{ReduceTo, ReductionResult}; +use crate::topology::{Graph, SimpleGraph}; + +/// Result of reducing MaxCut to MinimumCutIntoBoundedSets. +#[derive(Debug, Clone)] +pub struct ReductionMaxCutToMinCutBounded { + target: MinimumCutIntoBoundedSets, + /// Number of original vertices in the source problem. + original_n: usize, +} + +impl ReductionResult for ReductionMaxCutToMinCutBounded { + type Source = MaxCut; + type Target = MinimumCutIntoBoundedSets; + + fn target_problem(&self) -> &Self::Target { + &self.target + } + + /// Extract the source solution from the target balanced bisection. + /// Take only the first `original_n` vertex assignments. + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + target_solution[..self.original_n].to_vec() + } +} + +#[reduction( + overhead = { + num_vertices = "2 * num_vertices + 2", + num_edges = "(num_vertices + 1) * (2 * num_vertices + 1)", + } +)] +impl ReduceTo> for MaxCut { + type Result = ReductionMaxCutToMinCutBounded; + + fn reduce_to(&self) -> Self::Result { + let n = self.graph().num_vertices(); + + // Step 1: Pad to even vertex count. + // n' = n if n is even, n+1 if n is odd. N = 2*n'. + let n_prime = n + (n % 2); // round up to even + let big_n = 2 * n_prime; + + // Step 2: Compute W_max + let w_max = self.edge_weights().iter().copied().max().unwrap_or(0) + 1; + + // Build an adjacency lookup for the original graph + let orig_edges = self.graph().edges(); + let mut edge_weight_map: std::collections::HashMap<(usize, usize), i32> = + std::collections::HashMap::new(); + for (idx, &(u, v)) in orig_edges.iter().enumerate() { + let w = *self.edge_weight_by_index(idx).unwrap(); + let (a, b) = if u < v { (u, v) } else { (v, u) }; + edge_weight_map.insert((a, b), w); + } + + // Step 3: Build complete graph K_N with inverted weights + let mut edges = Vec::new(); + let mut weights = Vec::new(); + for i in 0..big_n { + for j in (i + 1)..big_n { + edges.push((i, j)); + if let Some(&w) = edge_weight_map.get(&(i, j)) { + weights.push(w_max - w); + } else { + weights.push(w_max); + } + } + } + + // Step 4: Set source, sink, size_bound + let source_vertex = n_prime; + let sink_vertex = n_prime + 1; + let size_bound = n_prime; + + let target = MinimumCutIntoBoundedSets::new( + SimpleGraph::new(big_n, edges), + weights, + source_vertex, + sink_vertex, + size_bound, + ); + + ReductionMaxCutToMinCutBounded { + target, + original_n: n, + } + } +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_rule_example_specs() -> Vec { + use crate::export::SolutionPair; + use crate::solvers::BruteForce; + + vec![crate::example_db::specs::RuleExampleSpec { + id: "maxcut_to_minimumcutintoboundedsets", + build: || { + // Triangle with unit weights: max cut = 2 + let source = MaxCut::new( + SimpleGraph::new(3, vec![(0, 1), (1, 2), (0, 2)]), + vec![1i32, 1, 1], + ); + let reduction = + ReduceTo::>::reduce_to(&source); + + // Find optimal source and target solutions + let solver = BruteForce::new(); + let source_witness = solver.find_witness(&source).unwrap(); + let target_witness = solver.find_witness(reduction.target_problem()).unwrap(); + + crate::example_db::specs::assemble_rule_example( + &source, + reduction.target_problem(), + vec![SolutionPair { + source_config: source_witness, + target_config: target_witness, + }], + ) + }, + }] +} + +#[cfg(test)] +#[path = "../unit_tests/rules/maxcut_minimumcutintoboundedsets.rs"] +mod tests; diff --git a/src/rules/mod.rs b/src/rules/mod.rs index fc59bbf3..f33644ba 100644 --- a/src/rules/mod.rs +++ b/src/rules/mod.rs @@ -55,6 +55,7 @@ pub(crate) mod minimumvertexcover_minimumfeedbackarcset; pub(crate) mod minimumvertexcover_minimumfeedbackvertexset; pub(crate) mod minimumvertexcover_minimumhittingset; pub(crate) mod minimumvertexcover_minimumsetcovering; +pub(crate) mod naesatisfiability_maxcut; pub(crate) mod paintshop_qubo; pub(crate) mod partition_cosineproductintegration; pub(crate) mod partition_knapsack; @@ -330,6 +331,7 @@ pub(crate) fn canonical_rule_example_specs() -> Vec, + source_num_vars: usize, +} + +impl ReductionResult for ReductionNAESATToMaxCut { + type Source = NAESatisfiability; + type Target = MaxCut; + + fn target_problem(&self) -> &Self::Target { + &self.target + } + + /// Extract a NAE-SAT assignment from a MaxCut partition. + /// + /// Variable x_i is assigned based on vertex 2*i: if it is in set 0 + /// (config[2*i] == 0), set x_i = false (config value 0); if in set 1, + /// set x_i = true (config value 1). + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + (0..self.source_num_vars) + .map(|i| target_solution[2 * i]) + .collect() + } +} + +/// Map a literal to its vertex index. +/// +/// Positive literal l (l > 0): vertex 2*(l-1) +/// Negative literal l (l < 0): vertex 2*((-l)-1) + 1 +fn literal_vertex(lit: i32) -> usize { + let var_idx = lit.unsigned_abs() as usize - 1; + if lit > 0 { + 2 * var_idx + } else { + 2 * var_idx + 1 + } +} + +#[reduction( + overhead = { + num_vertices = "2 * num_vars", + num_edges = "num_vars + num_literal_pairs", + } +)] +impl ReduceTo> for NAESatisfiability { + type Result = ReductionNAESATToMaxCut; + + fn reduce_to(&self) -> Self::Result { + let n = self.num_vars(); + let m = self.num_clauses(); + let total_vertices = 2 * n; + let big_m = (m + 1) as i32; + + let mut edges: Vec<(usize, usize)> = Vec::new(); + let mut weights: Vec = Vec::new(); + + // Step 1: Variable edges — connect (2*i, 2*i+1) with weight M = m+1 + for i in 0..n { + edges.push((2 * i, 2 * i + 1)); + weights.push(big_m); + } + + // Step 2: Clause edges — for each clause, add weight-1 edges between + // all pairs of literal vertices + for clause in self.clauses() { + let lits = &clause.literals; + for a in 0..lits.len() { + for b in (a + 1)..lits.len() { + edges.push((literal_vertex(lits[a]), literal_vertex(lits[b]))); + weights.push(1); + } + } + } + + let graph = SimpleGraph::new(total_vertices, edges); + let target = MaxCut::new(graph, weights); + + ReductionNAESATToMaxCut { + target, + source_num_vars: n, + } + } +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_rule_example_specs() -> Vec { + use crate::export::SolutionPair; + use crate::models::formula::CNFClause; + + vec![crate::example_db::specs::RuleExampleSpec { + id: "naesatisfiability_to_maxcut", + build: || { + // 3 variables, 2 clauses: + // C1 = (x1, x2, ~x3) + // C2 = (~x1, x3, x2) + // NAE-satisfying: x1=T, x2=F, x3=T + let source = NAESatisfiability::new( + 3, + vec![ + CNFClause::new(vec![1, 2, -3]), + CNFClause::new(vec![-1, 3, 2]), + ], + ); + crate::example_db::specs::rule_example_with_witness::<_, MaxCut>( + source, + SolutionPair { + // x1=T(1), x2=F(0), x3=T(1) + source_config: vec![1, 0, 1], + // Vertices: x1(0)=1, ~x1(1)=0, x2(2)=0, ~x2(3)=1, x3(4)=1, ~x3(5)=0 + // All variable edges cross (weight M=3 each) -> 3*3=9 + // C1=(x1,x2,~x3): vertices 0,2,5 -> sides {1},{0,0} -> edges (0,2) crosses, (0,5) crosses, (2,5) doesn't -> +2 + // C2=(~x1,x3,x2): vertices 1,4,2 -> sides {0},{1,0} -> edges (1,4) crosses, (1,2) doesn't, (4,2) crosses -> +2 + // Total = 9 + 2 + 2 = 13 + target_config: vec![1, 0, 0, 1, 1, 0], + }, + ) + }, + }] +} + +#[cfg(test)] +#[path = "../unit_tests/rules/naesatisfiability_maxcut.rs"] +mod tests; diff --git a/src/rules/partition_binpacking.rs b/src/rules/partition_binpacking.rs new file mode 100644 index 00000000..4314c678 --- /dev/null +++ b/src/rules/partition_binpacking.rs @@ -0,0 +1,91 @@ +//! Reduction from Partition to BinPacking. +//! +//! Given a Partition instance with sizes A = {a_1, ..., a_n} and total sum S, +//! construct a BinPacking instance with: +//! - Items: same sizes (cast from u64 to i32) +//! - Bin capacity: floor(S / 2) +//! +//! A valid partition (two subsets of equal sum) exists iff all items can be +//! packed into exactly 2 bins of capacity S/2. If S is odd, 2 bins of capacity +//! floor(S/2) cannot hold all items, so the answer is NO for both problems. +//! +//! Solution extraction is the identity: the binary subset assignment in Partition +//! directly corresponds to the bin assignment in BinPacking. + +use crate::models::misc::{BinPacking, Partition}; +use crate::reduction; +use crate::rules::traits::{ReduceTo, ReductionResult}; + +/// Result of reducing Partition to BinPacking. +#[derive(Debug, Clone)] +pub struct ReductionPartitionToBinPacking { + target: BinPacking, +} + +impl ReductionResult for ReductionPartitionToBinPacking { + type Source = Partition; + type Target = BinPacking; + + fn target_problem(&self) -> &Self::Target { + &self.target + } + + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + // BinPacking may use any bin indices (0..n-1). Remap the two distinct + // bins used in a 2-bin packing to Partition's {0, 1} assignment. + // The first bin encountered maps to 0, the second to 1. + let first_bin = target_solution[0]; + target_solution + .iter() + .map(|&b| if b == first_bin { 0 } else { 1 }) + .collect() + } +} + +fn partition_size_to_i32(value: u64) -> i32 { + i32::try_from(value) + .expect("Partition -> BinPacking requires all sizes and total_sum / 2 to fit in i32") +} + +#[reduction(overhead = { + num_items = "num_elements", +})] +impl ReduceTo> for Partition { + type Result = ReductionPartitionToBinPacking; + + fn reduce_to(&self) -> Self::Result { + let sizes: Vec = self + .sizes() + .iter() + .copied() + .map(partition_size_to_i32) + .collect(); + let capacity = partition_size_to_i32(self.total_sum() / 2); + + ReductionPartitionToBinPacking { + target: BinPacking::new(sizes, capacity), + } + } +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_rule_example_specs() -> Vec { + use crate::export::SolutionPair; + + vec![crate::example_db::specs::RuleExampleSpec { + id: "partition_to_binpacking", + build: || { + crate::example_db::specs::rule_example_with_witness::<_, BinPacking>( + Partition::new(vec![3, 1, 1, 2, 2, 1]), + SolutionPair { + source_config: vec![0, 1, 1, 0, 1, 1], + target_config: vec![0, 1, 1, 0, 1, 1], + }, + ) + }, + }] +} + +#[cfg(test)] +#[path = "../unit_tests/rules/partition_binpacking.rs"] +mod tests; diff --git a/src/rules/threepartition_flowshopscheduling.rs b/src/rules/threepartition_flowshopscheduling.rs new file mode 100644 index 00000000..efa72dcb --- /dev/null +++ b/src/rules/threepartition_flowshopscheduling.rs @@ -0,0 +1,163 @@ +//! Reduction from ThreePartition to FlowShopScheduling. +//! +//! Given a 3-Partition instance with 3m elements of sizes s(a_i) and bound B, +//! construct a 3-machine flow-shop scheduling instance: +//! +//! - 3m "element jobs": job i has task_lengths = [s(a_i), s(a_i), s(a_i)] +//! - (m-1) "separator jobs": task_lengths = [0, L, 0] where L = m*B + 1 +//! - Deadline D = makespan of a canonical schedule (computed via compute_makespan) +//! +//! A valid 3-partition exists iff the flow-shop schedule meets deadline D. +//! The large separator tasks on machine 2 force exactly 3 element jobs +//! (summing to B) between consecutive separators. +//! +//! Solution extraction: decode Lehmer code to job order, count separators +//! to determine which group each element job belongs to. + +use crate::models::misc::{FlowShopScheduling, ThreePartition}; +use crate::reduction; +use crate::rules::traits::{ReduceTo, ReductionResult}; + +/// Result of reducing ThreePartition to FlowShopScheduling. +#[derive(Debug, Clone)] +pub struct ReductionThreePartitionToFSS { + target: FlowShopScheduling, + /// Number of elements (3m) in the source problem. + num_elements: usize, +} + +impl ReductionResult for ReductionThreePartitionToFSS { + type Source = ThreePartition; + type Target = FlowShopScheduling; + + fn target_problem(&self) -> &Self::Target { + &self.target + } + + /// Extract source solution from target solution. + /// + /// The target config is a Lehmer code encoding a job permutation. + /// Decode to job order, then walk through counting separators + /// to assign each element job to a group. + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + let n = self.target.num_jobs(); + let job_order = + crate::models::misc::decode_lehmer(target_solution, n).expect("valid Lehmer code"); + + let mut config = vec![0usize; self.num_elements]; + let mut current_group = 0; + + for &job in &job_order { + if job < self.num_elements { + // Element job: assign to current group + config[job] = current_group; + } else { + // Separator job: advance to next group + current_group += 1; + } + } + + config + } +} + +#[reduction(overhead = { + num_jobs = "num_elements + num_groups - 1", +})] +impl ReduceTo for ThreePartition { + type Result = ReductionThreePartitionToFSS; + + fn reduce_to(&self) -> Self::Result { + let num_elements = self.num_elements(); + let num_groups = self.num_groups(); + let bound = self.bound(); + + // L = m * B + 1 — large enough to force grouping + let big_l = (num_groups as u64) * bound + 1; + + // Build task_lengths: element jobs first, then separator jobs + let mut task_lengths = Vec::with_capacity(num_elements + num_groups - 1); + + // Element jobs: identical task length on all 3 machines + for &size in self.sizes() { + task_lengths.push(vec![size, size, size]); + } + + // Separator jobs: [0, L, 0] + for _ in 0..num_groups.saturating_sub(1) { + task_lengths.push(vec![0, big_l, 0]); + } + + // Compute deadline from canonical schedule. + // Canonical order: group1 elements, sep1, group2 elements, sep2, ... + // We use a valid partition ordering to compute the achievable makespan. + let canonical_order: Vec = { + let mut order = Vec::with_capacity(num_elements + num_groups - 1); + for g in 0..num_groups { + // Add 3 element jobs per group (in natural order) + for i in 0..3 { + order.push(g * 3 + i); + } + // Add separator after each group except the last + if g < num_groups - 1 { + order.push(num_elements + g); + } + } + order + }; + + let target_no_deadline = FlowShopScheduling::new(3, task_lengths.clone(), u64::MAX); + let deadline = target_no_deadline.compute_makespan(&canonical_order); + + let target = FlowShopScheduling::new(3, task_lengths, deadline); + + ReductionThreePartitionToFSS { + target, + num_elements, + } + } +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_rule_example_specs() -> Vec { + use crate::export::SolutionPair; + + vec![crate::example_db::specs::RuleExampleSpec { + id: "threepartition_to_flowshopscheduling", + build: || { + // ThreePartition: sizes [4, 5, 6, 4, 6, 5], bound=15, m=2 + // Valid partition: {4,5,6} and {4,6,5} + let source = ThreePartition::new(vec![4, 5, 6, 4, 6, 5], 15); + let reduction = ReduceTo::::reduce_to(&source); + let target = reduction.target_problem(); + + // Canonical order: elements [0,1,2], separator [6], elements [3,4,5] + // Lehmer encode: job order [0,1,2,6,3,4,5] + // For Lehmer encoding of [0,1,2,6,3,4,5]: + // available=[0,1,2,3,4,5,6], pick 0 -> index 0; available=[1,2,3,4,5,6] + // available=[1,2,3,4,5,6], pick 1 -> index 0; available=[2,3,4,5,6] + // available=[2,3,4,5,6], pick 2 -> index 0; available=[3,4,5,6] + // available=[3,4,5,6], pick 6 -> index 3; available=[3,4,5] + // available=[3,4,5], pick 3 -> index 0; available=[4,5] + // available=[4,5], pick 4 -> index 0; available=[5] + // available=[5], pick 5 -> index 0; + let target_config = vec![0, 0, 0, 3, 0, 0, 0]; + + // Source config: element 0,1,2 -> group 0; element 3,4,5 -> group 1 + let source_config = vec![0, 0, 0, 1, 1, 1]; + + crate::example_db::specs::assemble_rule_example( + &source, + target, + vec![SolutionPair { + source_config, + target_config, + }], + ) + }, + }] +} + +#[cfg(test)] +#[path = "../unit_tests/rules/threepartition_flowshopscheduling.rs"] +mod tests; diff --git a/src/rules/threepartition_jobshopscheduling.rs b/src/rules/threepartition_jobshopscheduling.rs new file mode 100644 index 00000000..fd638c62 --- /dev/null +++ b/src/rules/threepartition_jobshopscheduling.rs @@ -0,0 +1,215 @@ +//! Reduction from ThreePartition to JobShopScheduling. +//! +//! Given a 3-Partition instance with 3m positive integers (each strictly between +//! B/4 and B/2) that must be partitioned into m triples summing to B, construct a +//! Job-Shop Scheduling instance on 2 processors: +//! +//! - **Element jobs** (3m jobs): job i has tasks [(0, s(a_i)), (1, s(a_i))]. +//! - **Separator jobs** (m-1 jobs): job k has a single task [(0, L)] where L = m*B + 1. +//! +//! The separators force m windows of size B on processor 0. A valid 3-partition +//! exists iff the optimal makespan equals the threshold D = m*B + (m-1)*L. +//! +//! Solution extraction: decode the processor-0 Lehmer code to find the task +//! ordering, locate the separator boundaries, and assign each element to the +//! group (window) it occupies. +//! +//! Reference: Garey, Johnson & Sethi (1976). "The complexity of flowshop and +//! jobshop scheduling." Mathematics of Operations Research 1, pp. 117-129. + +use crate::models::misc::{JobShopScheduling, ThreePartition}; +use crate::reduction; +use crate::rules::traits::{ReduceTo, ReductionResult}; + +/// Result of reducing ThreePartition to JobShopScheduling. +#[derive(Debug, Clone)] +pub struct ReductionThreePartitionToJSS { + target: JobShopScheduling, + /// Number of elements (3m) in the source problem. + num_elements: usize, + /// Number of groups (m) in the source problem. + num_groups: usize, + /// The makespan threshold: schedules achieving this makespan correspond + /// to valid 3-partitions. + threshold: u64, +} + +impl ReductionThreePartitionToJSS { + /// The makespan threshold D: a valid 3-partition exists iff the optimal + /// makespan of the target JSS instance equals D. + pub fn threshold(&self) -> u64 { + self.threshold + } + + /// Compute the makespan threshold D = m*B + (m-1)*L where L = m*B + 1. + fn compute_threshold(num_groups: usize, bound: u64) -> u64 { + let m = num_groups as u64; + let b = bound; + let l = m * b + 1; + m * b + (m - 1) * l + } + + /// Compute the separator length L = m*B + 1. + fn separator_length(num_groups: usize, bound: u64) -> u64 { + (num_groups as u64) * bound + 1 + } +} + +impl ReductionResult for ReductionThreePartitionToJSS { + type Source = ThreePartition; + type Target = JobShopScheduling; + + fn target_problem(&self) -> &Self::Target { + &self.target + } + + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + // The target config encodes Lehmer codes for each machine's tasks. + // Machine 0 has: 3m element tasks (task index 0 of each element job) + // + (m-1) separator tasks + // = 3m + (m-1) tasks total + // Machine 1 has: 3m element tasks (task index 1 of each element job) + // + // The config layout is [machine_0_lehmer..., machine_1_lehmer...]. + // machine_0_lehmer has length (3m + m - 1) = 4m - 1. + // + // We decode machine 0's ordering to find which group each element + // belongs to: elements between separators k-1 and k form group k. + + let num_elem = self.num_elements; + let m = self.num_groups; + + // Number of tasks on machine 0: element tasks + separator tasks + let machine0_len = num_elem + (m - 1); + + // Decode machine 0 Lehmer code + let machine0_lehmer = &target_solution[..machine0_len]; + let machine0_order = crate::models::misc::decode_lehmer(machine0_lehmer, machine0_len) + .expect("valid Lehmer code for machine 0"); + + // Task IDs on machine 0: + // - Element job i contributes task at flat index 2*i (first task of job i). + // - Separator job k contributes task at flat index 2*num_elem + k. + // + // Build mapping: flat task ID -> element index or separator marker. + let separator_task_ids: Vec = (0..m - 1).map(|k| 2 * num_elem + k).collect(); + + // machine0_order gives the order of task indices assigned to machine 0. + // The flatten_tasks() in JobShopScheduling assigns IDs sequentially: + // job 0 tasks get ids [0, 1], job 1 tasks get [2, 3], ... + // Element job i (2 tasks): ids [2*i, 2*i+1] + // Separator job k (1 task): id [2*num_elem + k] + // + // Machine 0 tasks are: element task 2*i (for i in 0..num_elem) and + // separator task 2*num_elem+k (for k in 0..m-1). + // Machine 1 tasks are: element task 2*i+1 (for i in 0..num_elem). + // + // The machine_task_ids for machine 0 are ordered by job index (since + // flatten_tasks iterates jobs in order): [0, 2, 4, ..., 2*(num_elem-1), + // 2*num_elem, 2*num_elem+1, ...]. + // + // machine0_order[j] gives the j-th machine-local index in the Lehmer + // permutation, which maps to machine_task_ids[machine0_order[j]]. + + // Build the machine 0 task id list in the same order as flatten_tasks + let mut machine0_task_ids: Vec = Vec::with_capacity(machine0_len); + for i in 0..num_elem { + machine0_task_ids.push(2 * i); // element job i, task 0 (on machine 0) + } + for k in 0..m - 1 { + machine0_task_ids.push(2 * num_elem + k); // separator job k + } + + // The actual ordering of tasks on machine 0: + let ordered_task_ids: Vec = machine0_order + .iter() + .map(|&local_idx| machine0_task_ids[local_idx]) + .collect(); + + // Now assign groups: walk through ordered_task_ids, incrementing group + // at each separator. + let mut config = vec![0usize; num_elem]; + let mut current_group = 0usize; + + for &task_id in &ordered_task_ids { + if separator_task_ids.contains(&task_id) { + current_group += 1; + } else { + // This is an element task with flat id 2*i => element i + let element_index = task_id / 2; + config[element_index] = current_group; + } + } + + config + } +} + +#[reduction(overhead = { + num_jobs = "num_elements + num_groups - 1", + num_tasks = "2 * num_elements + num_groups - 1", +})] +impl ReduceTo for ThreePartition { + type Result = ReductionThreePartitionToJSS; + + fn reduce_to(&self) -> Self::Result { + let num_elements = self.num_elements(); + let m = self.num_groups(); + let bound = self.bound(); + let l = ReductionThreePartitionToJSS::separator_length(m, bound); + let threshold = ReductionThreePartitionToJSS::compute_threshold(m, bound); + + // Build jobs + let mut jobs: Vec> = Vec::with_capacity(num_elements + m - 1); + + // Element jobs: 2 tasks each, one on each processor + for &size in self.sizes() { + jobs.push(vec![(0, size), (1, size)]); + } + + // Separator jobs: 1 task each, on processor 0 + for _ in 0..m.saturating_sub(1) { + jobs.push(vec![(0, l)]); + } + + ReductionThreePartitionToJSS { + target: JobShopScheduling::new(2, jobs), + num_elements, + num_groups: m, + threshold, + } + } +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_rule_example_specs() -> Vec { + use crate::export::SolutionPair; + + vec![crate::example_db::specs::RuleExampleSpec { + id: "threepartition_to_jobshopscheduling", + build: || { + // m=1: sizes [4, 5, 6], bound=15, one group + // 3 element jobs, 0 separators => 3 jobs, 6 tasks + // All elements go to group 0: config = [0, 0, 0] + let source = ThreePartition::new(vec![4, 5, 6], 15); + let reduction = ReduceTo::::reduce_to(&source); + + // For m=1, any ordering works. Use identity ordering on both machines. + // Machine 0: 3 tasks => Lehmer [0, 0, 0] + // Machine 1: 3 tasks => Lehmer [0, 0, 0] + let target_config = vec![0, 0, 0, 0, 0, 0]; + + crate::example_db::specs::rule_example_with_witness::<_, JobShopScheduling>( + source, + SolutionPair { + source_config: vec![0, 0, 0], + target_config, + }, + ) + }, + }] +} + +#[cfg(test)] +#[path = "../unit_tests/rules/threepartition_jobshopscheduling.rs"] +mod tests; diff --git a/src/rules/threepartition_resourceconstrainedscheduling.rs b/src/rules/threepartition_resourceconstrainedscheduling.rs new file mode 100644 index 00000000..61895a45 --- /dev/null +++ b/src/rules/threepartition_resourceconstrainedscheduling.rs @@ -0,0 +1,93 @@ +//! Reduction from ThreePartition to ResourceConstrainedScheduling. +//! +//! Given a 3-Partition instance with 3m elements and target sum B (where each +//! element a_i satisfies B/4 < a_i < B/2), construct a ResourceConstrainedScheduling +//! instance with: +//! - 3m unit-length tasks (one per element) +//! - 3 processors (at most 3 tasks per time slot) +//! - 1 resource with bound B +//! - Resource requirement for task i = s(a_i) +//! - Deadline D = m (number of triples) +//! +//! A valid 3-partition exists iff the tasks can be feasibly scheduled: +//! the B/4 < a_i < B/2 constraint forces exactly 3 tasks per slot, and +//! the resource bound forces each slot's triple to sum to exactly B. +//! +//! Solution extraction is the identity: config[i] = time slot for task i +//! directly gives the group assignment for element i. +//! +//! Reference: Garey & Johnson, *Computers and Intractability*, Appendix A5.2. + +use crate::models::misc::{ResourceConstrainedScheduling, ThreePartition}; +use crate::reduction; +use crate::rules::traits::{ReduceTo, ReductionResult}; + +/// Result of reducing ThreePartition to ResourceConstrainedScheduling. +#[derive(Debug, Clone)] +pub struct ReductionThreePartitionToRCS { + target: ResourceConstrainedScheduling, +} + +impl ReductionResult for ReductionThreePartitionToRCS { + type Source = ThreePartition; + type Target = ResourceConstrainedScheduling; + + fn target_problem(&self) -> &Self::Target { + &self.target + } + + /// Solution extraction: identity mapping. + /// ThreePartition config (group index 0..m-1) maps directly to time slot assignment. + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + target_solution.to_vec() + } +} + +#[reduction(overhead = { + num_tasks = "num_elements", +})] +impl ReduceTo for ThreePartition { + type Result = ReductionThreePartitionToRCS; + + fn reduce_to(&self) -> Self::Result { + let m = self.num_groups(); + let bound = self.bound(); + + // Each element becomes a task with resource requirement = element size + let resource_requirements: Vec> = self.sizes().iter().map(|&s| vec![s]).collect(); + + ReductionThreePartitionToRCS { + target: ResourceConstrainedScheduling::new( + 3, // 3 processors + vec![bound], // 1 resource with bound B + resource_requirements, + m as u64, // deadline = m time slots + ), + } + } +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_rule_example_specs() -> Vec { + use crate::export::SolutionPair; + + vec![crate::example_db::specs::RuleExampleSpec { + id: "threepartition_to_resourceconstrainedscheduling", + build: || { + // sizes [4, 5, 6, 4, 6, 5], B=15, m=2 + // partition: {4,5,6} and {4,6,5} — both sum to 15 + // config: elements 0,1,2 in group 0; elements 3,4,5 in group 1 + crate::example_db::specs::rule_example_with_witness::<_, ResourceConstrainedScheduling>( + ThreePartition::new(vec![4, 5, 6, 4, 6, 5], 15), + SolutionPair { + source_config: vec![0, 0, 0, 1, 1, 1], + target_config: vec![0, 0, 0, 1, 1, 1], + }, + ) + }, + }] +} + +#[cfg(test)] +#[path = "../unit_tests/rules/threepartition_resourceconstrainedscheduling.rs"] +mod tests; diff --git a/src/rules/threepartition_sequencingtominimizeweightedtardiness.rs b/src/rules/threepartition_sequencingtominimizeweightedtardiness.rs new file mode 100644 index 00000000..66344236 --- /dev/null +++ b/src/rules/threepartition_sequencingtominimizeweightedtardiness.rs @@ -0,0 +1,138 @@ +//! Reduction from ThreePartition to SequencingToMinimizeWeightedTardiness. +//! +//! Given a 3-PARTITION instance with 3m elements, bound B, and sizes s(a_i) +//! with B/4 < s(a_i) < B/2 and total sum = mB, construct a weighted tardiness +//! scheduling instance using the filler-task approach (Garey & Johnson, A5.1). +//! +//! - 3m element tasks: length = s(a_i), weight = 1, deadline = mB + (m-1) +//! - (m-1) filler tasks: length = 1, weight = mB + 1, deadline = (j+1)B + (j+1) +//! - Bound K = 0 +//! +//! Filler weights force zero tardiness, creating m slots of width B separated +//! by unit gaps. Exactly 3 element tasks must fill each slot. + +use crate::models::misc::{SequencingToMinimizeWeightedTardiness, ThreePartition}; +use crate::reduction; +use crate::rules::traits::{ReduceTo, ReductionResult}; + +/// Result of reducing ThreePartition to SequencingToMinimizeWeightedTardiness. +#[derive(Debug, Clone)] +pub struct ReductionThreePartitionToSMWT { + target: SequencingToMinimizeWeightedTardiness, + /// Number of element tasks (3m) — indices 0..num_elements are element tasks, + /// indices num_elements.. are filler tasks. + num_elements: usize, +} + +impl ReductionResult for ReductionThreePartitionToSMWT { + type Source = ThreePartition; + type Target = SequencingToMinimizeWeightedTardiness; + + fn target_problem(&self) -> &Self::Target { + &self.target + } + + /// Extract a ThreePartition group assignment from a target Lehmer-code solution. + /// + /// Decode the Lehmer code into a permutation, then count how many filler + /// tasks have been seen before each element task. The filler count gives + /// the group (slot) index for that element. + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + let n = self.target.num_tasks(); + let schedule = crate::models::misc::decode_lehmer(target_solution, n) + .expect("target solution must be a valid Lehmer code"); + + let mut assignment = vec![0usize; self.num_elements]; + let mut filler_count = 0usize; + + for &job in &schedule { + if job < self.num_elements { + // Element task — assign to current group (= number of fillers seen so far) + assignment[job] = filler_count; + } else { + // Filler task — advance to next group + filler_count += 1; + } + } + + assignment + } +} + +#[reduction(overhead = { + num_tasks = "num_elements + num_groups - 1", +})] +impl ReduceTo for ThreePartition { + type Result = ReductionThreePartitionToSMWT; + + fn reduce_to(&self) -> Self::Result { + let m = self.num_groups(); + let b = self.bound(); + let n = self.num_elements(); + let horizon = (m as u64) * b + (m as u64 - 1); + let filler_weight = (m as u64) * b + 1; + + let total_tasks = n + m.saturating_sub(1); + let mut lengths = Vec::with_capacity(total_tasks); + let mut weights = Vec::with_capacity(total_tasks); + let mut deadlines = Vec::with_capacity(total_tasks); + + // Element tasks: length = s(a_i), weight = 1, deadline = horizon + for &size in self.sizes() { + lengths.push(size); + weights.push(1); + deadlines.push(horizon); + } + + // Filler tasks: length = 1, weight = mB+1, deadline = (j+1)*B + (j+1) + for j in 0..m.saturating_sub(1) { + lengths.push(1); + weights.push(filler_weight); + let deadline = ((j + 1) as u64) * b + (j + 1) as u64; + deadlines.push(deadline); + } + + ReductionThreePartitionToSMWT { + target: SequencingToMinimizeWeightedTardiness::new(lengths, weights, deadlines, 0), + num_elements: n, + } + } +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_rule_example_specs() -> Vec { + use crate::export::SolutionPair; + + vec![crate::example_db::specs::RuleExampleSpec { + id: "threepartition_to_sequencingtominimizeweightedtardiness", + build: || { + // m=2, B=20, sizes=[7,7,6,7,7,6], sum=40=2*20 + // B/4=5, B/2=10 => all sizes strictly between 5 and 10 + // Partition: {7,7,6} in slot 0 and {7,7,6} in slot 1 + // Schedule: t0(7) t1(7) t2(6) f0(1) t3(7) t4(7) t5(6) + // Permutation: [0,1,2,6,3,4,5] + // Lehmer for [0,1,2,6,3,4,5]: + // pos 0: job 0 in [0,1,2,3,4,5,6] -> index 0 + // pos 1: job 1 in [1,2,3,4,5,6] -> index 0 + // pos 2: job 2 in [2,3,4,5,6] -> index 0 + // pos 3: job 6 in [3,4,5,6] -> index 3 + // pos 4: job 3 in [3,4,5] -> index 0 + // pos 5: job 4 in [4,5] -> index 0 + // pos 6: job 5 in [5] -> index 0 + crate::example_db::specs::rule_example_with_witness::< + _, + SequencingToMinimizeWeightedTardiness, + >( + ThreePartition::new(vec![7, 7, 6, 7, 7, 6], 20), + SolutionPair { + source_config: vec![0, 0, 0, 1, 1, 1], + target_config: vec![0, 0, 0, 3, 0, 0, 0], + }, + ) + }, + }] +} + +#[cfg(test)] +#[path = "../unit_tests/rules/threepartition_sequencingtominimizeweightedtardiness.rs"] +mod tests; diff --git a/src/rules/threepartition_sequencingwithreleasetimesanddeadlines.rs b/src/rules/threepartition_sequencingwithreleasetimesanddeadlines.rs new file mode 100644 index 00000000..976c6ee5 --- /dev/null +++ b/src/rules/threepartition_sequencingwithreleasetimesanddeadlines.rs @@ -0,0 +1,164 @@ +//! Reduction from ThreePartition to SequencingWithReleaseTimesAndDeadlines. +//! +//! Given a 3-Partition instance with 3m elements of sizes s(a_i) and bound B, +//! construct a single-machine scheduling instance with: +//! - 3m element tasks: length = s(a_i), release = 0, deadline = m*B + (m-1) +//! - (m-1) filler tasks: length = 1, release = (j+1)*B + j, deadline = (j+1)*B + j + 1 +//! +//! The filler tasks partition the timeline into m slots of width B each. Since +//! B/4 < s(a_i) < B/2, exactly 3 element tasks must fit in each slot, yielding +//! a valid 3-partition iff the schedule is feasible. +//! +//! Reference: Garey & Johnson, *Computers and Intractability*, Section 4.2. + +use crate::models::misc::{SequencingWithReleaseTimesAndDeadlines, ThreePartition}; +use crate::reduction; +use crate::rules::traits::{ReduceTo, ReductionResult}; + +/// Number of element tasks (= source.num_elements() = 3m). +fn num_element_tasks(source: &ThreePartition) -> usize { + source.num_elements() +} + +/// Number of filler tasks (= m - 1). +fn num_filler_tasks(source: &ThreePartition) -> usize { + source.num_groups() - 1 +} + +/// Result of reducing ThreePartition to SequencingWithReleaseTimesAndDeadlines. +#[derive(Debug, Clone)] +pub struct ReductionThreePartitionToSRTD { + target: SequencingWithReleaseTimesAndDeadlines, + /// Number of element tasks (3m) — first 3m tasks in the target are element tasks. + num_element_tasks: usize, + /// The bound B from the source. + bound: u64, +} + +impl ReductionResult for ReductionThreePartitionToSRTD { + type Source = ThreePartition; + type Target = SequencingWithReleaseTimesAndDeadlines; + + fn target_problem(&self) -> &Self::Target { + &self.target + } + + /// Extract a ThreePartition config from a target schedule config. + /// + /// Decode the Lehmer code to a task permutation, simulate the schedule to + /// find each task's start time, then assign each element task to its slot + /// based on start_time / (B + 1). + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + let n = self.target.num_tasks(); + // Decode Lehmer code to permutation + let schedule = crate::models::misc::decode_lehmer(target_solution, n) + .expect("target_solution must be a valid Lehmer code"); + + // Simulate the schedule to find start times + let mut current_time: u64 = 0; + let mut slot_assignment = vec![0usize; self.num_element_tasks]; + let slot_width = self.bound + 1; // B + 1 (slot width including the filler gap) + + for &task in &schedule { + let start = current_time.max(self.target.release_times()[task]); + let finish = start + self.target.lengths()[task]; + current_time = finish; + + // Only element tasks (indices 0..3m) contribute to the partition + if task < self.num_element_tasks { + let slot = (start / slot_width) as usize; + slot_assignment[task] = slot; + } + } + + slot_assignment + } +} + +#[reduction(overhead = { + num_tasks = "num_elements + num_groups - 1", +})] +impl ReduceTo for ThreePartition { + type Result = ReductionThreePartitionToSRTD; + + fn reduce_to(&self) -> Self::Result { + let n_elem = num_element_tasks(self); + let n_fill = num_filler_tasks(self); + let m = self.num_groups(); + let b = self.bound(); + let total_tasks = n_elem + n_fill; + + // Time horizon: m*B + (m-1) = m*(B+1) - 1 + let horizon = (m as u64) * (b + 1) - 1; + + let mut lengths = Vec::with_capacity(total_tasks); + let mut release_times = Vec::with_capacity(total_tasks); + let mut deadlines = Vec::with_capacity(total_tasks); + + // Element tasks (indices 0..3m) + for &size in self.sizes() { + lengths.push(size); + release_times.push(0); + deadlines.push(horizon); + } + + // Filler tasks (indices 3m..4m-1) + for j in 0..n_fill { + // Filler j separates slot j from slot j+1 + // Release = (j+1)*B + j, Deadline = (j+1)*B + j + 1 + let release = ((j + 1) as u64) * b + (j as u64); + lengths.push(1); + release_times.push(release); + deadlines.push(release + 1); + } + + ReductionThreePartitionToSRTD { + target: SequencingWithReleaseTimesAndDeadlines::new(lengths, release_times, deadlines), + num_element_tasks: n_elem, + bound: b, + } + } +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_rule_example_specs() -> Vec { + use crate::export::SolutionPair; + + vec![crate::example_db::specs::RuleExampleSpec { + id: "threepartition_to_sequencingwithreleasetimesanddeadlines", + build: || { + // ThreePartition: sizes=[4,5,6,4,6,5], bound=15, m=2 + // Groups: {4,5,6}=15, {4,6,5}=15 + // Source config: [0,0,0,1,1,1] (elements 0,1,2 in group 0; 3,4,5 in group 1) + // + // Target: 6 element tasks + 1 filler = 7 tasks + // Schedule for source config [0,0,0,1,1,1]: + // Slot 0 [0,15): tasks 0(len=4), 1(len=5), 2(len=6) -> times [0,4), [4,9), [9,15) + // Filler [15,16): task 6(len=1) + // Slot 1 [16,31): tasks 3(len=4), 4(len=6), 5(len=5) -> times [16,20), [20,26), [26,31) + // Permutation: [0,1,2,6,3,4,5] + // Lehmer code: [0,0,0,3,0,0,0] + // remaining=[0,1,2,3,4,5,6], pick 0 -> 0, remaining=[1,2,3,4,5,6] + // remaining=[1,2,3,4,5,6], pick 0 -> 1, remaining=[2,3,4,5,6] + // remaining=[2,3,4,5,6], pick 0 -> 2, remaining=[3,4,5,6] + // remaining=[3,4,5,6], pick 3 -> 6, remaining=[3,4,5] + // remaining=[3,4,5], pick 0 -> 3, remaining=[4,5] + // remaining=[4,5], pick 0 -> 4, remaining=[5] + // remaining=[5], pick 0 -> 5 + crate::example_db::specs::rule_example_with_witness::< + _, + SequencingWithReleaseTimesAndDeadlines, + >( + ThreePartition::new(vec![4, 5, 6, 4, 6, 5], 15), + SolutionPair { + source_config: vec![0, 0, 0, 1, 1, 1], + target_config: vec![0, 0, 0, 3, 0, 0, 0], + }, + ) + }, + }] +} + +#[cfg(test)] +#[path = "../unit_tests/rules/threepartition_sequencingwithreleasetimesanddeadlines.rs"] +mod tests; diff --git a/src/unit_tests/rules/analysis.rs b/src/unit_tests/rules/analysis.rs index cd450618..73da4b26 100644 --- a/src/unit_tests/rules/analysis.rs +++ b/src/unit_tests/rules/analysis.rs @@ -248,7 +248,9 @@ fn test_find_dominated_rules_returns_known_set() { "KClique {graph: \"SimpleGraph\"}", "ILP {variable: \"bool\"}", ), - // K3-SAT → QUBO via SAT → CircuitSAT → SpinGlass chain + // K2-SAT → QUBO via SAT → NAESAT → MaxCut → SpinGlass chain + ("KSatisfiability {k: \"K2\"}", "QUBO {weight: \"f64\"}"), + // K3-SAT → QUBO via MVC → MIS → MaxSetPacking chain ("KSatisfiability {k: \"K3\"}", "QUBO {weight: \"f64\"}"), // Knapsack -> ILP -> QUBO is better than the direct penalty reduction ("Knapsack", "QUBO {weight: \"f64\"}"), diff --git a/src/unit_tests/rules/exactcoverby3sets_maximumsetpacking.rs b/src/unit_tests/rules/exactcoverby3sets_maximumsetpacking.rs new file mode 100644 index 00000000..14c53bb1 --- /dev/null +++ b/src/unit_tests/rules/exactcoverby3sets_maximumsetpacking.rs @@ -0,0 +1,83 @@ +use super::*; +use crate::models::set::ExactCoverBy3Sets; +use crate::rules::test_helpers::assert_satisfaction_round_trip_from_optimization_target; +use crate::solvers::BruteForce; +use crate::traits::Problem; +use crate::types::Max; + +#[test] +fn test_exactcoverby3sets_to_maximumsetpacking_closed_loop() { + let source = ExactCoverBy3Sets::new( + 6, + vec![[0, 1, 2], [0, 1, 3], [3, 4, 5], [2, 4, 5], [1, 3, 5]], + ); + let reduction = ReduceTo::>::reduce_to(&source); + + assert_satisfaction_round_trip_from_optimization_target( + &source, + &reduction, + "ExactCoverBy3Sets -> MaximumSetPacking closed loop", + ); +} + +#[test] +fn test_exactcoverby3sets_to_maximumsetpacking_structure() { + let source = ExactCoverBy3Sets::new( + 6, + vec![[0, 1, 2], [0, 1, 3], [3, 4, 5], [2, 4, 5], [1, 3, 5]], + ); + let reduction = ReduceTo::>::reduce_to(&source); + let target = reduction.target_problem(); + + // Same number of sets as source subsets + assert_eq!(target.num_sets(), source.num_subsets()); + assert_eq!(target.num_sets(), 5); + + // Each set should have exactly 3 elements (converted from [usize; 3] to Vec) + for i in 0..target.num_sets() { + assert_eq!(target.get_set(i).unwrap().len(), 3); + } + + // Verify specific set contents + assert_eq!(target.sets()[0], vec![0, 1, 2]); + assert_eq!(target.sets()[1], vec![0, 1, 3]); + assert_eq!(target.sets()[2], vec![3, 4, 5]); + assert_eq!(target.sets()[3], vec![2, 4, 5]); + assert_eq!(target.sets()[4], vec![1, 3, 5]); +} + +#[test] +fn test_exactcoverby3sets_to_maximumsetpacking_unsatisfiable() { + // Universe {0,1,2,3,4,5} but subsets cannot form an exact cover: + // all subsets share element 0 + let source = ExactCoverBy3Sets::new(6, vec![[0, 1, 2], [0, 3, 4], [0, 4, 5]]); + let reduction = ReduceTo::>::reduce_to(&source); + let target = reduction.target_problem(); + + // Best packing can only select one set (since all share element 0) + let best = BruteForce::new() + .find_witness(target) + .expect("Should have an optimal solution"); + assert_eq!(target.evaluate(&best), Max(Some(1))); + + // q = 2, but packing value is 1 < 2, so no exact cover exists + let extracted = reduction.extract_solution(&best); + assert!(!source.evaluate(&extracted)); +} + +#[test] +fn test_exactcoverby3sets_to_maximumsetpacking_optimal_value() { + // Satisfiable instance: S0={0,1,2}, S1={3,4,5} form an exact cover + let source = ExactCoverBy3Sets::new(6, vec![[0, 1, 2], [3, 4, 5], [0, 3, 4]]); + let reduction = ReduceTo::>::reduce_to(&source); + let target = reduction.target_problem(); + + let best = BruteForce::new() + .find_witness(target) + .expect("Should have an optimal solution"); + // Maximum packing: S0 + S1 = 2 disjoint sets = q + assert_eq!(target.evaluate(&best), Max(Some(2))); + + let extracted = reduction.extract_solution(&best); + assert!(source.evaluate(&extracted)); +} diff --git a/src/unit_tests/rules/ilp_i32_ilp_bool.rs b/src/unit_tests/rules/ilp_i32_ilp_bool.rs new file mode 100644 index 00000000..d18367f0 --- /dev/null +++ b/src/unit_tests/rules/ilp_i32_ilp_bool.rs @@ -0,0 +1,224 @@ +use crate::models::algebraic::{LinearConstraint, ObjectiveSense, ILP}; +use crate::rules::traits::{ReduceTo, ReductionResult}; +use crate::solvers::BruteForce; +use crate::traits::Problem; + +/// Helper: brute-force solve a small ILP, extract solution back to ILP, +/// and return (source_config, source_obj). +fn solve_via_bool(source: &ILP) -> Option<(Vec, f64)> { + let reduction = ReduceTo::>::reduce_to(source); + let target = reduction.target_problem(); + let solver = BruteForce::new(); + let witness = solver.find_witness(target)?; + let source_config = reduction.extract_solution(&witness); + let values: Vec = source_config.iter().map(|&c| c as i64).collect(); + let obj = source.evaluate_objective(&values); + Some((source_config, obj)) +} + +#[test] +fn test_ilp_i32_to_ilp_bool_closed_loop() { + // Minimize -5x0 - 6x1, s.t. x0 + x1 <= 5, 4x0 + 7x1 <= 28 + let source = ILP::::new( + 2, + vec![ + LinearConstraint::le(vec![(0, 1.0), (1, 1.0)], 5.0), + LinearConstraint::le(vec![(0, 4.0), (1, 7.0)], 28.0), + ], + vec![(0, -5.0), (1, -6.0)], + ObjectiveSense::Minimize, + ); + + let (config, obj) = solve_via_bool(&source).expect("should find optimal"); + // Optimal: x0=3, x1=2, obj=-27 + let values: Vec = config.iter().map(|&c| c as i64).collect(); + assert!( + source.is_feasible(&values), + "extracted solution must be feasible" + ); + assert!( + (obj - (-27.0)).abs() < 1e-9, + "optimal objective should be -27, got {obj}" + ); +} + +#[test] +fn test_ilp_i32_to_ilp_bool_maximize() { + // Maximize 3x0 + 5x1, s.t. x0 <= 4, x1 <= 3, x0 + x1 <= 6 + let source = ILP::::new( + 2, + vec![ + LinearConstraint::le(vec![(0, 1.0)], 4.0), + LinearConstraint::le(vec![(1, 1.0)], 3.0), + LinearConstraint::le(vec![(0, 1.0), (1, 1.0)], 6.0), + ], + vec![(0, 3.0), (1, 5.0)], + ObjectiveSense::Maximize, + ); + + let (config, obj) = solve_via_bool(&source).expect("should find optimal"); + let values: Vec = config.iter().map(|&c| c as i64).collect(); + assert!(source.is_feasible(&values)); + // Optimal: x0=3, x1=3, obj=24 + assert!( + (obj - 24.0).abs() < 1e-9, + "optimal objective should be 24, got {obj}" + ); +} + +#[test] +fn test_ilp_i32_to_ilp_bool_empty() { + let source = ILP::::empty(); + let reduction = ReduceTo::>::reduce_to(&source); + let target = reduction.target_problem(); + assert_eq!(target.num_vars, 0); + assert!(target.constraints.is_empty()); + assert!(target.objective.is_empty()); +} + +#[test] +fn test_ilp_i32_to_ilp_bool_target_structure() { + // x0 + x1 <= 5, with bounds => U=[5, 5], K=[3, 3], total=6 bool vars + let source = ILP::::new( + 2, + vec![LinearConstraint::le(vec![(0, 1.0), (1, 1.0)], 5.0)], + vec![(0, 1.0)], + ObjectiveSense::Maximize, + ); + + let reduction = ReduceTo::>::reduce_to(&source); + let target = reduction.target_problem(); + + // Both variables bounded to 5: K=3 each, total 6 + assert_eq!(target.num_vars, 6); + // Same number of constraints + assert_eq!(target.constraints.len(), 1); + // All dims are 2 (binary) + assert!(target.dims().iter().all(|&d| d == 2)); +} + +#[test] +fn test_ilp_i32_to_ilp_bool_single_variable() { + // Maximize x0, s.t. x0 <= 7 + let source = ILP::::new( + 1, + vec![LinearConstraint::le(vec![(0, 1.0)], 7.0)], + vec![(0, 1.0)], + ObjectiveSense::Maximize, + ); + + let (config, obj) = solve_via_bool(&source).expect("should find optimal"); + assert_eq!(config, vec![7]); + assert!((obj - 7.0).abs() < 1e-9); +} + +#[test] +fn test_ilp_i32_to_ilp_bool_equality_constraint() { + // Minimize x0, s.t. x0 + x1 = 4, x0 <= 3, x1 <= 3 + let source = ILP::::new( + 2, + vec![ + LinearConstraint::eq(vec![(0, 1.0), (1, 1.0)], 4.0), + LinearConstraint::le(vec![(0, 1.0)], 3.0), + LinearConstraint::le(vec![(1, 1.0)], 3.0), + ], + vec![(0, 1.0)], + ObjectiveSense::Minimize, + ); + + let (config, obj) = solve_via_bool(&source).expect("should find optimal"); + let values: Vec = config.iter().map(|&c| c as i64).collect(); + assert!(source.is_feasible(&values)); + // x0=1, x1=3, obj=1 + assert!((obj - 1.0).abs() < 1e-9, "optimal should be 1, got {obj}"); +} + +#[test] +fn test_ilp_i32_to_ilp_bool_ge_constraint() { + // Maximize x0 + x1, s.t. x0 >= 2, x1 >= 1, x0 + x1 <= 5 + let source = ILP::::new( + 2, + vec![ + LinearConstraint::ge(vec![(0, 1.0)], 2.0), + LinearConstraint::ge(vec![(1, 1.0)], 1.0), + LinearConstraint::le(vec![(0, 1.0), (1, 1.0)], 5.0), + ], + vec![(0, 1.0), (1, 1.0)], + ObjectiveSense::Maximize, + ); + + let (config, obj) = solve_via_bool(&source).expect("should find optimal"); + let values: Vec = config.iter().map(|&c| c as i64).collect(); + assert!(source.is_feasible(&values)); + assert!((obj - 5.0).abs() < 1e-9, "optimal should be 5, got {obj}"); +} + +#[test] +fn test_ilp_i32_to_ilp_bool_infeasible() { + // x0 >= 3 AND x0 <= 1 => infeasible + let source = ILP::::new( + 1, + vec![ + LinearConstraint::ge(vec![(0, 1.0)], 3.0), + LinearConstraint::le(vec![(0, 1.0)], 1.0), + ], + vec![(0, 1.0)], + ObjectiveSense::Minimize, + ); + + let reduction = ReduceTo::>::reduce_to(&source); + let target = reduction.target_problem(); + let solver = BruteForce::new(); + // Should have no feasible solution + assert!(solver.find_witness(target).is_none()); +} + +#[test] +fn test_ilp_i32_to_ilp_bool_variable_fixed_at_zero() { + // x0 <= 0 means x0 is always 0 => 0 binary variables for x0 + // Maximize x1, s.t. x0 <= 0, x1 <= 3 + let source = ILP::::new( + 2, + vec![ + LinearConstraint::le(vec![(0, 1.0)], 0.0), + LinearConstraint::le(vec![(1, 1.0)], 3.0), + ], + vec![(1, 1.0)], + ObjectiveSense::Maximize, + ); + + let (config, obj) = solve_via_bool(&source).expect("should find optimal"); + assert_eq!(config[0], 0, "x0 should be fixed at 0"); + assert_eq!(config[1], 3, "x1 should be 3"); + assert!((obj - 3.0).abs() < 1e-9); +} + +#[test] +fn test_ilp_i32_to_ilp_bool_power_of_two_bound() { + // x0 <= 7 (= 2^3 - 1): standard binary, weights = [1, 2, 4] + let source = ILP::::new( + 1, + vec![LinearConstraint::le(vec![(0, 1.0)], 7.0)], + vec![(0, 1.0)], + ObjectiveSense::Maximize, + ); + + let reduction = ReduceTo::>::reduce_to(&source); + let target = reduction.target_problem(); + // 7 = 2^3 - 1, so K=3 bits + assert_eq!(target.num_vars, 3); +} + +#[test] +fn test_ilp_i32_to_ilp_bool_preserves_sense() { + for sense in [ObjectiveSense::Minimize, ObjectiveSense::Maximize] { + let source = ILP::::new( + 1, + vec![LinearConstraint::le(vec![(0, 1.0)], 3.0)], + vec![(0, 1.0)], + sense, + ); + let reduction = ReduceTo::>::reduce_to(&source); + assert_eq!(reduction.target_problem().sense, sense); + } +} diff --git a/src/unit_tests/rules/maxcut_minimumcutintoboundedsets.rs b/src/unit_tests/rules/maxcut_minimumcutintoboundedsets.rs new file mode 100644 index 00000000..0e2f3c6a --- /dev/null +++ b/src/unit_tests/rules/maxcut_minimumcutintoboundedsets.rs @@ -0,0 +1,146 @@ +use super::*; +use crate::models::graph::{MaxCut, MinimumCutIntoBoundedSets}; +use crate::rules::test_helpers::assert_optimization_round_trip_from_optimization_target; +use crate::rules::traits::ReduceTo; +use crate::topology::SimpleGraph; + +#[test] +fn test_maxcut_to_minimumcutintoboundedsets_closed_loop() { + // Triangle K_3 with unit weights: max cut = 2 + let source = MaxCut::new( + SimpleGraph::new(3, vec![(0, 1), (1, 2), (0, 2)]), + vec![1i32, 1, 1], + ); + let reduction = ReduceTo::>::reduce_to(&source); + assert_optimization_round_trip_from_optimization_target( + &source, + &reduction, + "MaxCut triangle -> MinCutBounded", + ); +} + +#[test] +fn test_maxcut_to_minimumcutintoboundedsets_single_edge() { + // Single edge K_2: max cut = 1 + let source = MaxCut::new(SimpleGraph::new(2, vec![(0, 1)]), vec![1i32]); + let reduction = ReduceTo::>::reduce_to(&source); + assert_optimization_round_trip_from_optimization_target( + &source, + &reduction, + "MaxCut single edge -> MinCutBounded", + ); +} + +#[test] +fn test_maxcut_to_minimumcutintoboundedsets_path_p4() { + // Path P_4: vertices 0-1-2-3, unit weights, max cut = 3 (alternate: 0,1,0,1) + let source = MaxCut::new( + SimpleGraph::new(4, vec![(0, 1), (1, 2), (2, 3)]), + vec![1i32, 1, 1], + ); + let reduction = ReduceTo::>::reduce_to(&source); + assert_optimization_round_trip_from_optimization_target( + &source, + &reduction, + "MaxCut path P4 -> MinCutBounded", + ); +} + +#[test] +fn test_maxcut_to_minimumcutintoboundedsets_weighted() { + // Triangle with weights [1, 2, 3]: max cut = 5 (cut edges with weights 2 and 3) + let source = MaxCut::new( + SimpleGraph::new(3, vec![(0, 1), (1, 2), (0, 2)]), + vec![1i32, 2, 3], + ); + let reduction = ReduceTo::>::reduce_to(&source); + assert_optimization_round_trip_from_optimization_target( + &source, + &reduction, + "MaxCut weighted triangle -> MinCutBounded", + ); +} + +#[test] +fn test_maxcut_to_minimumcutintoboundedsets_target_structure() { + // Verify the target problem structure for a 3-vertex graph + let source = MaxCut::new( + SimpleGraph::new(3, vec![(0, 1), (1, 2), (0, 2)]), + vec![1i32, 1, 1], + ); + let reduction = ReduceTo::>::reduce_to(&source); + let target = reduction.target_problem(); + + // n=3, n'=3+1=4, N=8 + assert_eq!(target.num_vertices(), 8); + // Complete graph K_8 has C(8,2) = 28 edges + assert_eq!(target.num_edges(), 28); + // source=n'=4, sink=n'+1=5 + assert_eq!(target.source(), 4); + assert_eq!(target.sink(), 5); + // size_bound = n' = 4 + assert_eq!(target.size_bound(), 4); +} + +#[test] +fn test_maxcut_to_minimumcutintoboundedsets_even_vertices() { + // Even number of vertices: n=4, n'=4, N=8 + let source = MaxCut::new( + SimpleGraph::new(4, vec![(0, 1), (1, 2), (2, 3), (0, 3)]), + vec![1i32, 1, 1, 1], + ); + let reduction = ReduceTo::>::reduce_to(&source); + let target = reduction.target_problem(); + + // n=4, n'=4, N=8 + assert_eq!(target.num_vertices(), 8); + assert_eq!(target.num_edges(), 28); // K_8 + assert_eq!(target.source(), 4); + assert_eq!(target.sink(), 5); + assert_eq!(target.size_bound(), 4); + + assert_optimization_round_trip_from_optimization_target( + &source, + &reduction, + "MaxCut even vertices -> MinCutBounded", + ); +} + +#[test] +fn test_maxcut_to_minimumcutintoboundedsets_extract_solution_size() { + // Verify extract_solution returns only original vertices + let source = MaxCut::new( + SimpleGraph::new(3, vec![(0, 1), (1, 2), (0, 2)]), + vec![1i32, 1, 1], + ); + let reduction = ReduceTo::>::reduce_to(&source); + + // Target has 8 vertices, extract should return 3 + let dummy_target_sol = vec![0, 1, 0, 1, 0, 1, 0, 1]; + let extracted = reduction.extract_solution(&dummy_target_sol); + assert_eq!(extracted.len(), 3); +} + +#[test] +fn test_maxcut_to_minimumcutintoboundedsets_weight_inversion() { + // Verify weight inversion: original edge gets W_max - w, non-edge gets W_max + // Use n=2 to keep the target small: n'=2, N=4, K_4 has 6 edges + let source = MaxCut::new(SimpleGraph::new(2, vec![(0, 1)]), vec![5i32]); + let reduction = ReduceTo::>::reduce_to(&source); + let target = reduction.target_problem(); + + // W_max = 5 + 1 = 6 + // n=2, n'=2, N=4, K_4 has 6 edges + assert_eq!(target.num_vertices(), 4); + assert_eq!(target.num_edges(), 6); + + // Edge (0,1) in original: weight = 6 - 5 = 1 + // All other edges: weight = 6 + let edge_weights = target.edge_weights(); + assert_eq!(edge_weights[0], 1); // (0,1): W_max - 5 = 1 + assert_eq!(edge_weights[1], 6); // (0,2): non-edge + assert_eq!(edge_weights[2], 6); // (0,3): non-edge + assert_eq!(edge_weights[3], 6); // (1,2): non-edge + assert_eq!(edge_weights[4], 6); // (1,3): non-edge + assert_eq!(edge_weights[5], 6); // (2,3): non-edge +} diff --git a/src/unit_tests/rules/naesatisfiability_maxcut.rs b/src/unit_tests/rules/naesatisfiability_maxcut.rs new file mode 100644 index 00000000..265df457 --- /dev/null +++ b/src/unit_tests/rules/naesatisfiability_maxcut.rs @@ -0,0 +1,164 @@ +use super::*; +use crate::models::formula::CNFClause; +use crate::models::formula::NAESatisfiability; +use crate::models::graph::MaxCut; +use crate::rules::test_helpers::assert_satisfaction_round_trip_from_optimization_target; +use crate::solvers::BruteForce; +use crate::topology::SimpleGraph; +use crate::traits::Problem; + +#[test] +fn test_naesatisfiability_to_maxcut_closed_loop() { + // 3 variables, 2 clauses: + // C1 = (x1, x2, x3) + // C2 = (~x1, ~x2, x3) + let naesat = NAESatisfiability::new( + 3, + vec![ + CNFClause::new(vec![1, 2, 3]), + CNFClause::new(vec![-1, -2, 3]), + ], + ); + let reduction = ReduceTo::>::reduce_to(&naesat); + let target = reduction.target_problem(); + + // 2*3 = 6 vertices + assert_eq!(target.num_vertices(), 6); + // 3 variable edges + 3 + 3 = 9 clause edges + assert_eq!(target.num_edges(), 9); + + assert_satisfaction_round_trip_from_optimization_target( + &naesat, + &reduction, + "NAESAT -> MaxCut closed loop", + ); +} + +#[test] +fn test_naesatisfiability_to_maxcut_single_clause() { + // Single clause: (x1, x2, x3) — NAE-satisfying iff not all same + let naesat = NAESatisfiability::new(3, vec![CNFClause::new(vec![1, 2, 3])]); + let reduction = ReduceTo::>::reduce_to(&naesat); + let target = reduction.target_problem(); + + // 6 vertices, 3 variable + 3 clause = 6 edges + assert_eq!(target.num_vertices(), 6); + assert_eq!(target.num_edges(), 6); + + assert_satisfaction_round_trip_from_optimization_target( + &naesat, + &reduction, + "NAESAT single clause -> MaxCut", + ); +} + +#[test] +fn test_naesatisfiability_to_maxcut_two_literal_clause() { + // Clause with 2 literals: (x1, ~x2) — always NAE-satisfying unless x1=T, x2=F or x1=F, x2=T... actually (x1, ~x2) is NAE-unsatisfied when both literals are same: x1=T,~x2=T (x2=F) or x1=F,~x2=F (x2=T). + // NAE-satisfied when x1 != ~x2, i.e., x1 == x2. + let naesat = NAESatisfiability::new(2, vec![CNFClause::new(vec![1, -2])]); + let reduction = ReduceTo::>::reduce_to(&naesat); + let target = reduction.target_problem(); + + // 4 vertices, 2 variable + 1 clause = 3 edges + assert_eq!(target.num_vertices(), 4); + assert_eq!(target.num_edges(), 3); + + assert_satisfaction_round_trip_from_optimization_target( + &naesat, + &reduction, + "NAESAT 2-literal clause -> MaxCut", + ); +} + +#[test] +fn test_naesatisfiability_to_maxcut_four_literal_clause() { + // Clause with 4 literals: (x1, x2, ~x3, x4) + let naesat = NAESatisfiability::new(4, vec![CNFClause::new(vec![1, 2, -3, 4])]); + let reduction = ReduceTo::>::reduce_to(&naesat); + let target = reduction.target_problem(); + + // 8 vertices, 4 variable + C(4,2)=6 clause = 10 edges + assert_eq!(target.num_vertices(), 8); + assert_eq!(target.num_edges(), 10); + + assert_satisfaction_round_trip_from_optimization_target( + &naesat, + &reduction, + "NAESAT 4-literal clause -> MaxCut", + ); +} + +#[test] +fn test_naesatisfiability_to_maxcut_extract_solution() { + // Verify specific extraction: x1=T, x2=F, x3=T + let naesat = NAESatisfiability::new( + 3, + vec![ + CNFClause::new(vec![1, 2, -3]), + CNFClause::new(vec![-1, 3, 2]), + ], + ); + let reduction = ReduceTo::>::reduce_to(&naesat); + + // Vertices: x1(0), ~x1(1), x2(2), ~x2(3), x3(4), ~x3(5) + // x1=T -> vertex 0 in set 1, vertex 1 in set 0 + // x2=F -> vertex 2 in set 0, vertex 3 in set 1 + // x3=T -> vertex 4 in set 1, vertex 5 in set 0 + let target_config = vec![1, 0, 0, 1, 1, 0]; + let extracted = reduction.extract_solution(&target_config); + assert_eq!(extracted, vec![1, 0, 1]); // x1=T, x2=F, x3=T + + // Verify this is a valid NAE-SAT solution + assert!(naesat.evaluate(&extracted).0); +} + +#[test] +fn test_naesatisfiability_to_maxcut_mixed_clause_sizes() { + // Mix of 2-literal and 3-literal clauses + let naesat = NAESatisfiability::new( + 3, + vec![ + CNFClause::new(vec![1, -2]), // 2 literals -> C(2,2)=1 pair + CNFClause::new(vec![1, 2, 3]), // 3 literals -> C(3,2)=3 pairs + CNFClause::new(vec![-1, -3]), // 2 literals -> 1 pair + ], + ); + let reduction = ReduceTo::>::reduce_to(&naesat); + let target = reduction.target_problem(); + + // 6 vertices, 3 variable + (1 + 3 + 1) = 8 edges + assert_eq!(target.num_vertices(), 6); + assert_eq!(target.num_edges(), 8); + + assert_satisfaction_round_trip_from_optimization_target( + &naesat, + &reduction, + "NAESAT mixed clause sizes -> MaxCut", + ); +} + +#[test] +fn test_naesatisfiability_to_maxcut_optimal_cut_value() { + // Verify the optimal cut value matches theoretical prediction + // n*M + sum(k_j - 1) for satisfiable instances + let naesat = NAESatisfiability::new( + 3, + vec![ + CNFClause::new(vec![1, 2, 3]), + CNFClause::new(vec![-1, -2, 3]), + ], + ); + let reduction = ReduceTo::>::reduce_to(&naesat); + let target = reduction.target_problem(); + + let solver = BruteForce::new(); + let witness = solver.find_witness(target); + assert!(witness.is_some()); + + let config = witness.unwrap(); + let cut_value = target.cut_size(&config); + // n=3, m=2, M=3, k1=3, k2=3 + // Expected: 3*3 + (3-1) + (3-1) = 9 + 2 + 2 = 13 + assert_eq!(cut_value, 13); +} diff --git a/src/unit_tests/rules/partition_binpacking.rs b/src/unit_tests/rules/partition_binpacking.rs new file mode 100644 index 00000000..c7041072 --- /dev/null +++ b/src/unit_tests/rules/partition_binpacking.rs @@ -0,0 +1,59 @@ +use super::*; +use crate::models::misc::{BinPacking, Partition}; +use crate::rules::test_helpers::assert_satisfaction_round_trip_from_optimization_target; +use crate::solvers::BruteForce; +use crate::traits::Problem; +use crate::types::Min; + +#[test] +fn test_partition_to_binpacking_closed_loop() { + let source = Partition::new(vec![3, 1, 1, 2, 2, 1]); + let reduction = ReduceTo::>::reduce_to(&source); + + assert_satisfaction_round_trip_from_optimization_target( + &source, + &reduction, + "Partition -> BinPacking closed loop", + ); +} + +#[test] +fn test_partition_to_binpacking_structure() { + let source = Partition::new(vec![3, 1, 1, 2, 2, 1]); + let reduction = ReduceTo::>::reduce_to(&source); + let target = reduction.target_problem(); + + assert_eq!(target.sizes(), &[3, 1, 1, 2, 2, 1]); + assert_eq!(*target.capacity(), 5); // total_sum = 10, capacity = 5 + assert_eq!(target.num_items(), source.num_elements()); +} + +#[test] +fn test_partition_to_binpacking_odd_total_is_not_satisfying() { + // Sizes [2, 4, 5], total = 11 (odd), capacity = 5 + // No balanced partition possible; BinPacking needs >= 3 bins + let source = Partition::new(vec![2, 4, 5]); + let reduction = ReduceTo::>::reduce_to(&source); + let target = reduction.target_problem(); + + let best = BruteForce::new() + .find_witness(target) + .expect("BinPacking target should always have an optimal solution"); + + // With capacity 5, items [2,4,5]: bin 0 gets [5], bin 1 gets [2,4]=6 > 5, + // so optimal needs 3 bins + let value = target.evaluate(&best); + assert_eq!(value, Min(Some(3))); + + let extracted = reduction.extract_solution(&best); + assert!(!source.evaluate(&extracted)); +} + +#[test] +#[should_panic( + expected = "Partition -> BinPacking requires all sizes and total_sum / 2 to fit in i32" +)] +fn test_partition_to_binpacking_panics_on_large_coefficients() { + let source = Partition::new(vec![(i32::MAX as u64) + 1]); + let _ = ReduceTo::>::reduce_to(&source); +} diff --git a/src/unit_tests/rules/threepartition_flowshopscheduling.rs b/src/unit_tests/rules/threepartition_flowshopscheduling.rs new file mode 100644 index 00000000..6e45ae91 --- /dev/null +++ b/src/unit_tests/rules/threepartition_flowshopscheduling.rs @@ -0,0 +1,185 @@ +use super::*; +use crate::models::misc::ThreePartition; +use crate::solvers::BruteForce; +use crate::traits::Problem; + +fn reduce_three_partition( + sizes: Vec, + bound: u64, +) -> (ThreePartition, ReductionThreePartitionToFSS) { + let source = ThreePartition::new(sizes, bound); + let reduction = ReduceTo::::reduce_to(&source); + (source, reduction) +} + +/// Encode a job order (permutation) as a Lehmer code. +fn encode_lehmer(job_order: &[usize]) -> Vec { + let n = job_order.len(); + let mut available: Vec = (0..n).collect(); + let mut lehmer = Vec::with_capacity(n); + for &job in job_order { + let pos = available.iter().position(|&x| x == job).unwrap(); + lehmer.push(pos); + available.remove(pos); + } + lehmer +} + +#[test] +fn test_threepartition_to_flowshopscheduling_closed_loop() { + // ThreePartition: sizes [4, 5, 6, 4, 6, 5], bound=15, m=2 + // Valid partition: group 0 = {4,5,6} (indices 0,1,2), group 1 = {4,6,5} (indices 3,4,5) + let (source, reduction) = reduce_three_partition(vec![4, 5, 6, 4, 6, 5], 15); + let target = reduction.target_problem(); + + // Verify source is satisfiable + let solver = BruteForce::new(); + assert!( + solver.find_witness(&source).is_some(), + "Source 3-Partition should be satisfiable" + ); + + // Verify target is satisfiable + assert!( + solver.find_witness(target).is_some(), + "Target FlowShopScheduling should be satisfiable" + ); + + // Canonical ordering: [0,1,2, sep(6), 3,4,5] -- group 1 elements, separator, group 2 elements + let canonical_order = vec![0, 1, 2, 6, 3, 4, 5]; + let canonical_lehmer = encode_lehmer(&canonical_order); + assert_eq!(canonical_lehmer, vec![0, 0, 0, 3, 0, 0, 0]); + + // Verify canonical ordering satisfies the target + let target_value = target.evaluate(&canonical_lehmer); + assert!(target_value.0, "Canonical ordering should meet deadline"); + + // Extract and verify: elements before separator -> group 0, after -> group 1 + let extracted = reduction.extract_solution(&canonical_lehmer); + assert_eq!(extracted, vec![0, 0, 0, 1, 1, 1]); + assert!( + source.evaluate(&extracted).0, + "Extracted solution should be a valid 3-partition" + ); + + // Test another valid ordering: group 2 first, then group 1 + let alt_order = vec![3, 4, 5, 6, 0, 1, 2]; + let alt_lehmer = encode_lehmer(&alt_order); + let alt_value = target.evaluate(&alt_lehmer); + assert!( + alt_value.0, + "Alternative valid ordering should meet deadline" + ); + let alt_extracted = reduction.extract_solution(&alt_lehmer); + assert_eq!(alt_extracted, vec![1, 1, 1, 0, 0, 0]); + assert!( + source.evaluate(&alt_extracted).0, + "Alternative extraction should be a valid 3-partition" + ); + + // Verify all valid-partition orderings extract correctly + // A valid partition groups elements into triples summing to B=15. + // For this instance: one triple from each of {4,5,6} values. + // Elements by value: val 4 at {0,3}, val 5 at {1,5}, val 6 at {2,4} + let target_witnesses = solver.find_all_witnesses(target); + let mut valid_extraction_count = 0; + for w in &target_witnesses { + let extracted = reduction.extract_solution(w); + if source.evaluate(&extracted).0 { + valid_extraction_count += 1; + } + } + assert!( + valid_extraction_count > 0, + "At least some target witnesses should extract to valid source solutions" + ); +} + +#[test] +fn test_threepartition_to_flowshopscheduling_structure() { + let (source, reduction) = reduce_three_partition(vec![4, 5, 6, 4, 6, 5], 15); + let target = reduction.target_problem(); + + // 3 machines + assert_eq!(target.num_processors(), 3); + // 6 element jobs + 1 separator = 7 total jobs + assert_eq!(target.num_jobs(), 7); + assert_eq!( + target.num_jobs(), + source.num_elements() + source.num_groups() - 1 + ); + + // Check element job task lengths + let task_lengths = target.task_lengths(); + for (i, tasks) in task_lengths.iter().enumerate().take(6) { + let size = source.sizes()[i]; + assert_eq!(*tasks, vec![size, size, size]); + } + + // Check separator job task lengths: [0, L, 0] where L = m*B+1 = 2*15+1 = 31 + let big_l = 2 * 15 + 1; + assert_eq!(task_lengths[6], vec![0, big_l, 0]); + + // Deadline should be positive + assert!(target.deadline() > 0); +} + +#[test] +fn test_threepartition_to_flowshopscheduling_solution_extraction() { + let (source, reduction) = reduce_three_partition(vec![4, 5, 6, 4, 6, 5], 15); + + // Test extraction for canonical orderings where elements are properly grouped + // Ordering: indices 0,1,2 (group 0), separator 6, indices 3,4,5 (group 1) + let lehmer = encode_lehmer(&[0, 1, 2, 6, 3, 4, 5]); + let extracted = reduction.extract_solution(&lehmer); + assert_eq!(extracted.len(), source.num_elements()); + assert_eq!(extracted, vec![0, 0, 0, 1, 1, 1]); + assert!(source.evaluate(&extracted).0); + + // Different valid grouping: {0,4,5}=group 0, {1,2,3}=group 1 + // 4+6+5=15 and 5+6+4=15 + let lehmer2 = encode_lehmer(&[0, 4, 5, 6, 1, 2, 3]); + let extracted2 = reduction.extract_solution(&lehmer2); + assert_eq!(extracted2[0], 0); // element 0 in group 0 + assert_eq!(extracted2[4], 0); // element 4 in group 0 + assert_eq!(extracted2[5], 0); // element 5 in group 0 + assert_eq!(extracted2[1], 1); // element 1 in group 1 + assert_eq!(extracted2[2], 1); // element 2 in group 1 + assert_eq!(extracted2[3], 1); // element 3 in group 1 + assert!(source.evaluate(&extracted2).0); +} + +#[test] +fn test_threepartition_to_flowshopscheduling_dims() { + let (_source, reduction) = reduce_three_partition(vec![4, 5, 6, 4, 6, 5], 15); + let target = reduction.target_problem(); + + // Lehmer code dims: [7, 6, 5, 4, 3, 2, 1] + let dims = target.dims(); + assert_eq!(dims, vec![7, 6, 5, 4, 3, 2, 1]); +} + +#[test] +fn test_threepartition_to_flowshopscheduling_canonical_makespan() { + let (source, reduction) = reduce_three_partition(vec![4, 5, 6, 4, 6, 5], 15); + let target = reduction.target_problem(); + + // The canonical ordering should achieve exactly the deadline + let canonical_order = vec![0, 1, 2, 6, 3, 4, 5]; + let makespan = target.compute_makespan(&canonical_order); + assert_eq!(makespan, target.deadline()); + + // Verify the deadline computation: + // m=2, B=15, L=31 + // Canonical schedule on M2: first element starts at s(a_0)=4, + // group1 takes B=15, separator takes L=31, group2 takes B=15 + // M2 finishes at 4 + 15 + 31 + 15 = 65 + // M3 lags behind M2 by one element's processing time at the end + assert!(target.deadline() > 0); + + // The number of elements + groups - 1 should equal num_jobs + assert_eq!( + source.num_elements() + source.num_groups() - 1, + target.num_jobs() + ); +} diff --git a/src/unit_tests/rules/threepartition_jobshopscheduling.rs b/src/unit_tests/rules/threepartition_jobshopscheduling.rs new file mode 100644 index 00000000..2cbb12ab --- /dev/null +++ b/src/unit_tests/rules/threepartition_jobshopscheduling.rs @@ -0,0 +1,218 @@ +use super::*; +use crate::models::misc::ThreePartition; +use crate::rules::test_helpers::assert_satisfaction_round_trip_from_optimization_target; +use crate::traits::Problem; +use crate::types::Min; + +/// m=1: 3 elements, bound=15, sizes=[4, 5, 6]. Only 1 group, no separators. +/// 3 jobs, 6 tasks. dims = [3,2,1,3,2,1] => 36 configs. Fast for brute force. +#[test] +fn test_threepartition_to_jobshopscheduling_closed_loop() { + let source = ThreePartition::new(vec![4, 5, 6], 15); + let reduction = ReduceTo::::reduce_to(&source); + + assert_satisfaction_round_trip_from_optimization_target( + &source, + &reduction, + "ThreePartition -> JobShopScheduling closed loop (m=1)", + ); +} + +/// Verify the target problem structure for m=1. +#[test] +fn test_threepartition_to_jss_structure_m1() { + let source = ThreePartition::new(vec![4, 5, 6], 15); + let reduction = ReduceTo::::reduce_to(&source); + let target = reduction.target_problem(); + + // m=1: 3 element jobs, 0 separator jobs + assert_eq!(target.num_processors(), 2); + assert_eq!(target.num_jobs(), 3); + assert_eq!(target.num_tasks(), 6); + + // Each element job has 2 tasks + for (i, job) in target.jobs().iter().enumerate() { + assert_eq!(job.len(), 2, "element job {i} should have 2 tasks"); + assert_eq!( + job[0].0, 0, + "element job {i} task 0 should be on processor 0" + ); + assert_eq!( + job[1].0, 1, + "element job {i} task 1 should be on processor 1" + ); + // Tasks have equal length = source size + assert_eq!(job[0].1, job[1].1); + } + + let sizes = source.sizes(); + assert_eq!(target.jobs()[0][0].1, sizes[0]); + assert_eq!(target.jobs()[1][0].1, sizes[1]); + assert_eq!(target.jobs()[2][0].1, sizes[2]); +} + +/// Verify the target problem structure for m=2. +#[test] +fn test_threepartition_to_jss_structure_m2() { + // m=2: 6 elements, bound=20, sizes satisfy B/4 < s < B/2 + let source = ThreePartition::new(vec![6, 7, 7, 6, 8, 6], 20); + let reduction = ReduceTo::::reduce_to(&source); + let target = reduction.target_problem(); + + // m=2: 6 element jobs + 1 separator job = 7 jobs + assert_eq!(target.num_processors(), 2); + assert_eq!(target.num_jobs(), 7); // 6 + 2 - 1 + assert_eq!(target.num_tasks(), 13); // 2*6 + 2 - 1 + + // Element jobs (0..5): 2 tasks each + for i in 0..6 { + let job = &target.jobs()[i]; + assert_eq!(job.len(), 2); + assert_eq!(job[0].0, 0); + assert_eq!(job[1].0, 1); + } + + // Separator job (index 6): 1 task on processor 0 + let separator = &target.jobs()[6]; + assert_eq!(separator.len(), 1); + assert_eq!(separator[0].0, 0); + + // Separator length L = m*B + 1 = 2*20 + 1 = 41 + assert_eq!(separator[0].1, 41); +} + +/// Verify that the threshold is correct for m=2. +#[test] +fn test_threepartition_to_jss_threshold_m2() { + let source = ThreePartition::new(vec![6, 7, 7, 6, 8, 6], 20); + let reduction = ReduceTo::::reduce_to(&source); + + // D = m*B + (m-1)*L = 2*20 + 1*41 = 81 + assert_eq!(reduction.threshold(), 81); +} + +/// For m=2, manually construct a valid schedule config and verify extraction. +#[test] +fn test_threepartition_to_jss_extraction_m2() { + // sizes = [6, 7, 7, 6, 8, 6], bound = 20, m = 2 + // Valid partition: group 0 = {7, 7, 6} (indices 1,2,3), group 1 = {6, 8, 6} (indices 0,4,5) + let source = ThreePartition::new(vec![6, 7, 7, 6, 8, 6], 20); + let reduction = ReduceTo::::reduce_to(&source); + let target = reduction.target_problem(); + + // Machine 0 tasks (local indices 0..6): + // local 0 -> element 0 (task id 0) + // local 1 -> element 1 (task id 2) + // local 2 -> element 2 (task id 4) + // local 3 -> element 3 (task id 6) + // local 4 -> element 4 (task id 8) + // local 5 -> element 5 (task id 10) + // local 6 -> separator 0 (task id 12) + // + // We want machine 0 order: elem1, elem2, elem3, separator0, elem0, elem4, elem5 + // That's local indices: [1, 2, 3, 6, 0, 4, 5] + // + // Lehmer encoding of permutation [1, 2, 3, 6, 0, 4, 5]: + // available = [0,1,2,3,4,5,6] + // pick 1 from [0,1,2,3,4,5,6] -> index 1, remaining [0,2,3,4,5,6] + // pick 2 from [0,2,3,4,5,6] -> index 1, remaining [0,3,4,5,6] + // pick 3 from [0,3,4,5,6] -> index 1, remaining [0,4,5,6] + // pick 6 from [0,4,5,6] -> index 3, remaining [0,4,5] + // pick 0 from [0,4,5] -> index 0, remaining [4,5] + // pick 4 from [4,5] -> index 0, remaining [5] + // pick 5 from [5] -> index 0 + let machine0_lehmer = vec![1, 1, 1, 3, 0, 0, 0]; + + // Machine 1 tasks (local indices 0..5): + // local 0 -> element 0 (task id 1) + // local 1 -> element 1 (task id 3) + // local 2 -> element 2 (task id 5) + // local 3 -> element 3 (task id 7) + // local 4 -> element 4 (task id 9) + // local 5 -> element 5 (task id 11) + // + // Any valid ordering; use identity: [0,1,2,3,4,5] => Lehmer [0,0,0,0,0,0] + let machine1_lehmer = vec![0, 0, 0, 0, 0, 0]; + + let mut config = machine0_lehmer; + config.extend(machine1_lehmer); + + // Verify the schedule produces a valid makespan + let value = target.evaluate(&config); + assert!(value.0.is_some(), "config should produce a valid schedule"); + + // Extract and verify source solution + let source_config = reduction.extract_solution(&config); + assert_eq!(source_config.len(), 6); + + // Elements 1,2,3 should be in group 0 (before separator) + // Elements 0,4,5 should be in group 1 (after separator) + assert_eq!(source_config[1], 0); // element 1 in group 0 + assert_eq!(source_config[2], 0); // element 2 in group 0 + assert_eq!(source_config[3], 0); // element 3 in group 0 + assert_eq!(source_config[0], 1); // element 0 in group 1 + assert_eq!(source_config[4], 1); // element 4 in group 1 + assert_eq!(source_config[5], 1); // element 5 in group 1 + + // Verify the extracted solution is a valid 3-partition + let source_value = source.evaluate(&source_config); + assert!( + source_value.0, + "extracted solution should be a valid 3-partition" + ); +} + +/// For m=1, verify that optimal makespan equals the sum of all sizes. +#[test] +fn test_threepartition_to_jss_makespan_m1() { + let source = ThreePartition::new(vec![4, 5, 6], 15); + let reduction = ReduceTo::::reduce_to(&source); + let target = reduction.target_problem(); + + // With m=1, no separators, threshold = 1*15 + 0 = 15 + assert_eq!(reduction.threshold(), 15); + + // Identity ordering: Lehmer [0,0,0] for machine 0, [0,0,0] for machine 1 + let config = vec![0, 0, 0, 0, 0, 0]; + let value = target.evaluate(&config); + + // Tasks on machine 0: 4, 5, 6 (total 15) + // Tasks on machine 1: must wait for respective machine 0 tasks + // Machine 0: [0,4], [4,9], [9,15] + // Machine 1: [4,8], [9,14], [15,21] + // Makespan = 21 + assert_eq!(value, Min(Some(21))); +} + +/// Verify overhead expressions are correct. +#[test] +fn test_threepartition_to_jss_overhead() { + let source = ThreePartition::new(vec![4, 5, 6], 15); + let reduction = ReduceTo::::reduce_to(&source); + let target = reduction.target_problem(); + + // num_jobs = num_elements + num_groups - 1 = 3 + 1 - 1 = 3 + assert_eq!( + target.num_jobs(), + source.num_elements() + source.num_groups() - 1 + ); + // num_tasks = 2 * num_elements + num_groups - 1 = 6 + 0 = 6 + assert_eq!( + target.num_tasks(), + 2 * source.num_elements() + source.num_groups() - 1 + ); + + // Also check for m=2 + let source2 = ThreePartition::new(vec![6, 7, 7, 6, 8, 6], 20); + let reduction2 = ReduceTo::::reduce_to(&source2); + let target2 = reduction2.target_problem(); + + assert_eq!( + target2.num_jobs(), + source2.num_elements() + source2.num_groups() - 1 + ); + assert_eq!( + target2.num_tasks(), + 2 * source2.num_elements() + source2.num_groups() - 1 + ); +} diff --git a/src/unit_tests/rules/threepartition_resourceconstrainedscheduling.rs b/src/unit_tests/rules/threepartition_resourceconstrainedscheduling.rs new file mode 100644 index 00000000..dcb9990b --- /dev/null +++ b/src/unit_tests/rules/threepartition_resourceconstrainedscheduling.rs @@ -0,0 +1,126 @@ +use super::*; +use crate::models::misc::{ResourceConstrainedScheduling, ThreePartition}; +use crate::rules::test_helpers::assert_satisfaction_round_trip_from_satisfaction_target; +use crate::solvers::BruteForce; +use crate::traits::Problem; + +fn reduce_three_partition( + sizes: &[u64], + bound: u64, +) -> (ThreePartition, ReductionThreePartitionToRCS) { + let source = ThreePartition::new(sizes.to_vec(), bound); + let reduction = ReduceTo::::reduce_to(&source); + (source, reduction) +} + +fn assert_satisfiability_matches( + source: &ThreePartition, + target: &ResourceConstrainedScheduling, + expected: bool, +) { + let solver = BruteForce::new(); + assert_eq!(solver.find_witness(source).is_some(), expected); + assert_eq!(solver.find_witness(target).is_some(), expected); +} + +#[test] +fn test_threepartition_to_resourceconstrainedscheduling_closed_loop() { + // sizes [4, 5, 6, 4, 6, 5], B=15, m=2 + // partition: {4,5,6} and {4,6,5} + let (source, reduction) = reduce_three_partition(&[4, 5, 6, 4, 6, 5], 15); + + assert_satisfaction_round_trip_from_satisfaction_target( + &source, + &reduction, + "ThreePartition -> ResourceConstrainedScheduling closed loop", + ); +} + +#[test] +fn test_threepartition_to_resourceconstrainedscheduling_structure() { + let (source, reduction) = reduce_three_partition(&[4, 5, 6, 4, 6, 5], 15); + let target = reduction.target_problem(); + + assert_eq!(target.num_tasks(), 6); + assert_eq!(target.num_tasks(), source.num_elements()); + assert_eq!(target.num_processors(), 3); + assert_eq!(target.num_resources(), 1); + assert_eq!(target.resource_bounds(), &[15]); + assert_eq!(target.deadline(), 2); // m = 6/3 = 2 + + // Check resource requirements match sizes + for (i, req) in target.resource_requirements().iter().enumerate() { + assert_eq!(req.len(), 1); + assert_eq!(req[0], source.sizes()[i]); + } +} + +#[test] +fn test_threepartition_to_resourceconstrainedscheduling_solution_extraction() { + let (source, reduction) = reduce_three_partition(&[4, 5, 6, 4, 6, 5], 15); + let target = reduction.target_problem(); + + let solver = BruteForce::new(); + let target_solutions = solver.find_all_witnesses(target); + + for sol in &target_solutions { + let extracted = reduction.extract_solution(sol); + assert_eq!(extracted.len(), source.num_elements()); + let target_valid = target.evaluate(sol); + let source_valid = source.evaluate(&extracted); + if target_valid.0 { + assert!( + source_valid.0, + "Valid RCS solution should yield valid ThreePartition" + ); + } + } +} + +#[test] +fn test_threepartition_to_resourceconstrainedscheduling_single_triple() { + // m=1: sizes [4, 5, 6], B=15 + let (source, reduction) = reduce_three_partition(&[4, 5, 6], 15); + let target = reduction.target_problem(); + + assert_eq!(target.num_tasks(), 3); + assert_eq!(target.deadline(), 1); // m=1 + assert_eq!(target.num_processors(), 3); + + assert_satisfiability_matches(&source, target, true); + + assert_satisfaction_round_trip_from_satisfaction_target( + &source, + &reduction, + "ThreePartition -> RCS single triple", + ); +} + +#[test] +fn test_threepartition_to_resourceconstrainedscheduling_infeasible() { + // sizes [4, 4, 7, 4, 4, 7], B=15, m=2 + // Only valid grouping of triples summing to 15: {4, 4, 7} and {4, 4, 7} + // This is actually feasible. Let's pick something infeasible. + // sizes [5, 5, 5, 5, 5, 5], B=15, m=2 — all equal, any triple sums to 15. Feasible. + // + // For infeasibility within the 3-Partition constraints (B/4 < a_i < B/2), + // we need sum = m*B but no valid partition. With m=1, B=15: + // sizes [4, 4, 7] sums to 15 — feasible. + // Actually constructing an infeasible instance that satisfies B/4 < a_i < B/2 + // and sum = m*B is non-trivial for small instances. We test feasible cases + // and rely on the closed-loop test for correctness. + // + // Test with m=2, B=21, sizes [6,7,8,6,7,8] sum=42=2*21 + // B/4=5.25, B/2=10.5, all sizes in (5.25, 10.5) ✓ + // Partition: {6,7,8}=21 and {6,7,8}=21 ✓ + let (source, reduction) = reduce_three_partition(&[6, 7, 8, 6, 7, 8], 21); + let target = reduction.target_problem(); + + assert_satisfiability_matches(&source, target, true); + + assert_satisfaction_round_trip_from_satisfaction_target( + &source, + &reduction, + "ThreePartition -> RCS two triples", + ); +} diff --git a/src/unit_tests/rules/threepartition_sequencingtominimizeweightedtardiness.rs b/src/unit_tests/rules/threepartition_sequencingtominimizeweightedtardiness.rs new file mode 100644 index 00000000..dca72e3f --- /dev/null +++ b/src/unit_tests/rules/threepartition_sequencingtominimizeweightedtardiness.rs @@ -0,0 +1,176 @@ +use super::*; +use crate::models::misc::{SequencingToMinimizeWeightedTardiness, ThreePartition}; +use crate::solvers::BruteForce; +use crate::traits::Problem; + +fn reduce(sizes: Vec, bound: u64) -> (ThreePartition, ReductionThreePartitionToSMWT) { + let source = ThreePartition::new(sizes, bound); + let reduction = ReduceTo::::reduce_to(&source); + (source, reduction) +} + +fn assert_satisfiability_matches( + source: &ThreePartition, + target: &SequencingToMinimizeWeightedTardiness, + expected: bool, +) { + let solver = BruteForce::new(); + assert_eq!( + solver.find_witness(source).is_some(), + expected, + "source satisfiability mismatch" + ); + assert_eq!( + solver.find_witness(target).is_some(), + expected, + "target satisfiability mismatch" + ); +} + +/// Verify the decision-level round trip: source is satisfiable iff target is, +/// and at least one target witness extracts to a valid source witness. +fn assert_decision_round_trip( + source: &ThreePartition, + reduction: &ReductionThreePartitionToSMWT, + context: &str, +) { + let solver = BruteForce::new(); + let target = reduction.target_problem(); + let source_sat = solver.find_witness(source).is_some(); + let target_witnesses = solver.find_all_witnesses(target); + let target_sat = !target_witnesses.is_empty(); + assert_eq!( + source_sat, target_sat, + "{context}: satisfiability mismatch (source={source_sat}, target={target_sat})" + ); + + if source_sat { + // At least one target witness must extract to a valid source witness + let found_valid = target_witnesses.iter().any(|tw| { + let extracted = reduction.extract_solution(tw); + source.evaluate(&extracted).0 + }); + assert!( + found_valid, + "{context}: no target witness extracted to a valid source solution" + ); + } +} + +#[test] +fn test_threepartition_to_sequencingtominimizeweightedtardiness_closed_loop() { + // m=2, B=20, sizes with B/4 < s < B/2 (i.e., 5 < s < 10) + // sizes: [7, 7, 6, 7, 7, 6], sum = 40 = 2*20 + // Valid partition: {7,7,6} and {7,7,6} + let (source, reduction) = reduce(vec![7, 7, 6, 7, 7, 6], 20); + + assert_decision_round_trip(&source, &reduction, "ThreePartition -> SMWT closed loop"); +} + +#[test] +fn test_threepartition_to_sequencingtominimizeweightedtardiness_structure() { + // m=2, B=20, 6 elements + 1 filler = 7 tasks + let (source, reduction) = reduce(vec![7, 7, 6, 7, 7, 6], 20); + let target = reduction.target_problem(); + + let m = source.num_groups(); + let b = source.bound(); + + // Total tasks: 3m + (m-1) = 6 + 1 = 7 + assert_eq!(target.num_tasks(), 7); + assert_eq!(target.num_tasks(), source.num_elements() + m - 1); + + // Element task lengths match source sizes + let lengths = target.lengths(); + for (len, &size) in lengths.iter().zip(source.sizes()) { + assert_eq!(*len, size); + } + + // Filler task length = 1 + for &len in &lengths[source.num_elements()..] { + assert_eq!(len, 1); + } + + // Element task weights = 1 + let weights = target.weights(); + for &w in &weights[..source.num_elements()] { + assert_eq!(w, 1); + } + + // Filler task weight = mB + 1 + let filler_weight = (m as u64) * b + 1; + for &w in &weights[source.num_elements()..] { + assert_eq!(w, filler_weight); + } + + // Element task deadlines = mB + (m-1) = horizon + let horizon = (m as u64) * b + (m as u64 - 1); + let deadlines = target.deadlines(); + for &d in &deadlines[..source.num_elements()] { + assert_eq!(d, horizon); + } + + // Filler deadlines: (j+1)*B + (j+1) + for (j, &d) in deadlines[source.num_elements()..].iter().enumerate() { + let expected = ((j + 1) as u64) * b + (j + 1) as u64; + assert_eq!(d, expected); + } + + // Bound = 0 + assert_eq!(target.bound(), 0); +} + +#[test] +fn test_threepartition_to_sequencingtominimizeweightedtardiness_m1() { + // m=1, B=20, 3 elements, no fillers + // sizes: [7, 7, 6], sum = 20 = 1*20 + let (source, reduction) = reduce(vec![7, 7, 6], 20); + let target = reduction.target_problem(); + + // 3 element tasks, 0 filler tasks + assert_eq!(target.num_tasks(), 3); + assert_eq!(target.bound(), 0); + + // All deadlines = 1*20 + 0 = 20 + for &d in target.deadlines() { + assert_eq!(d, 20); + } + + // m=1: any permutation of 3 tasks should satisfy (sum = B, all fit by deadline) + assert_decision_round_trip(&source, &reduction, "ThreePartition -> SMWT m=1"); +} + +#[test] +fn test_threepartition_to_sequencingtominimizeweightedtardiness_solution_extraction() { + let (source, reduction) = reduce(vec![7, 7, 6, 7, 7, 6], 20); + let target = reduction.target_problem(); + + let solver = BruteForce::new(); + let target_solutions = solver.find_all_witnesses(target); + assert!(!target_solutions.is_empty(), "target should be satisfiable"); + + // Verify that at least one target solution extracts to a valid source solution + let mut found_valid = false; + for sol in &target_solutions { + let extracted = reduction.extract_solution(sol); + assert_eq!(extracted.len(), source.num_elements()); + if source.evaluate(&extracted).0 { + found_valid = true; + } + } + assert!( + found_valid, + "at least one extraction must yield a valid 3-partition" + ); +} + +#[test] +fn test_threepartition_to_sequencingtominimizeweightedtardiness_satisfiability_match() { + // Feasible instance: m=2, B=20 + let (source, reduction) = reduce(vec![7, 7, 6, 7, 7, 6], 20); + assert_satisfiability_matches(&source, reduction.target_problem(), true); + + // m=1: always feasible (3 elements sum to B) + let (source1, reduction1) = reduce(vec![7, 7, 6], 20); + assert_satisfiability_matches(&source1, reduction1.target_problem(), true); +} diff --git a/src/unit_tests/rules/threepartition_sequencingwithreleasetimesanddeadlines.rs b/src/unit_tests/rules/threepartition_sequencingwithreleasetimesanddeadlines.rs new file mode 100644 index 00000000..58deb0fa --- /dev/null +++ b/src/unit_tests/rules/threepartition_sequencingwithreleasetimesanddeadlines.rs @@ -0,0 +1,94 @@ +use super::*; +use crate::models::misc::{SequencingWithReleaseTimesAndDeadlines, ThreePartition}; +use crate::rules::test_helpers::assert_satisfaction_round_trip_from_satisfaction_target; +use crate::solvers::BruteForce; +use crate::traits::Problem; + +fn reduce(sizes: Vec, bound: u64) -> (ThreePartition, ReductionThreePartitionToSRTD) { + let source = ThreePartition::new(sizes, bound); + let reduction = ReduceTo::::reduce_to(&source); + (source, reduction) +} + +#[test] +fn test_threepartition_to_sequencingwithreleasetimesanddeadlines_closed_loop() { + // sizes=[4,5,6,4,6,5], bound=15, m=2 + // Valid partition: {4,5,6} and {4,6,5} + let (source, reduction) = reduce(vec![4, 5, 6, 4, 6, 5], 15); + + assert_satisfaction_round_trip_from_satisfaction_target( + &source, + &reduction, + "ThreePartition -> SequencingWithReleaseTimesAndDeadlines closed loop", + ); +} + +#[test] +fn test_threepartition_to_sequencingwithreleasetimesanddeadlines_structure() { + let (source, reduction) = reduce(vec![4, 5, 6, 4, 6, 5], 15); + let target = reduction.target_problem(); + + // 3m = 6 element tasks + m-1 = 1 filler task = 7 total + assert_eq!(target.num_tasks(), 7); + assert_eq!(source.num_elements() + source.num_groups() - 1, 7); + + // Element tasks: lengths match source sizes + let lengths = target.lengths(); + assert_eq!(&lengths[..6], &[4, 5, 6, 4, 6, 5]); + // Filler task has length 1 + assert_eq!(lengths[6], 1); + + // Element tasks: release = 0, deadline = m*(B+1)-1 = 2*16-1 = 31 + for i in 0..6 { + assert_eq!(target.release_times()[i], 0); + assert_eq!(target.deadlines()[i], 31); + } + + // Filler task: release = 1*15+0 = 15, deadline = 16 + assert_eq!(target.release_times()[6], 15); + assert_eq!(target.deadlines()[6], 16); + + // Time horizon + assert_eq!(target.time_horizon(), 31); +} + +#[test] +fn test_threepartition_to_sequencingwithreleasetimesanddeadlines_satisfiability() { + let (source, reduction) = reduce(vec![4, 5, 6, 4, 6, 5], 15); + let target = reduction.target_problem(); + + let solver = BruteForce::new(); + // Source is satisfiable + assert!(solver.find_witness(&source).is_some()); + // Target should also be satisfiable + assert!(solver.find_witness(target).is_some()); +} + +#[test] +fn test_threepartition_to_sequencingwithreleasetimesanddeadlines_solution_extraction() { + let (source, reduction) = reduce(vec![4, 5, 6, 4, 6, 5], 15); + let target = reduction.target_problem(); + + let solver = BruteForce::new(); + let target_solutions = solver.find_all_witnesses(target); + + for sol in &target_solutions { + let extracted = reduction.extract_solution(sol); + assert_eq!(extracted.len(), source.num_elements()); + let source_valid = source.evaluate(&extracted); + assert!( + source_valid.0, + "Valid schedule should yield valid 3-partition" + ); + } +} + +#[test] +fn test_threepartition_to_sequencingwithreleasetimesanddeadlines_dims() { + let (_, reduction) = reduce(vec![4, 5, 6, 4, 6, 5], 15); + let target = reduction.target_problem(); + + // 7 tasks -> Lehmer dims [7,6,5,4,3,2,1] + let dims = target.dims(); + assert_eq!(dims, vec![7, 6, 5, 4, 3, 2, 1]); +} From d2acc1cea118274eff3fb58b6c0e945dbaccd081 Mon Sep 17 00:00:00 2001 From: zazabap Date: Tue, 31 Mar 2026 09:08:56 +0000 Subject: [PATCH 2/3] fix: register 9 missing rules in mod.rs and address review findings - Register all 9 unregistered rule modules in rules/mod.rs (were dead code) - Wire 9 canonical rule example specs in canonical_rule_example_specs() - Add ExactCoverBy3Sets -> ILP to dominated-rules allow-list - Rename misleading test_..._infeasible to test_..._two_triples - Fix unused variable warning in JobShopScheduling example builder Test count: 4258 -> 4312 (54 previously-dead tests now compiled and passing) Co-Authored-By: Claude Opus 4.6 (1M context) --- src/rules/mod.rs | 21 +++++++++++++++++++ src/rules/threepartition_jobshopscheduling.rs | 2 +- src/unit_tests/rules/analysis.rs | 2 ++ ...partition_resourceconstrainedscheduling.rs | 2 +- 4 files changed, 25 insertions(+), 2 deletions(-) diff --git a/src/rules/mod.rs b/src/rules/mod.rs index f33644ba..fabfa8c2 100644 --- a/src/rules/mod.rs +++ b/src/rules/mod.rs @@ -11,6 +11,7 @@ pub use registry::{EdgeCapabilities, ReductionEntry, ReductionOverhead}; pub(crate) mod circuit_spinglass; mod closestvectorproblem_qubo; pub(crate) mod coloring_qubo; +pub(crate) mod exactcoverby3sets_maximumsetpacking; pub(crate) mod exactcoverby3sets_staffscheduling; pub(crate) mod factoring_circuit; mod graph; @@ -26,6 +27,7 @@ pub(crate) mod hamiltoniancircuit_strongconnectivityaugmentation; pub(crate) mod hamiltoniancircuit_travelingsalesman; pub(crate) mod hamiltonianpath_consecutiveonessubmatrix; pub(crate) mod hamiltonianpath_isomorphicspanningtree; +pub(crate) mod ilp_i32_ilp_bool; pub(crate) mod kclique_balancedcompletebipartitesubgraph; pub(crate) mod kclique_conjunctivebooleanquery; pub(crate) mod kclique_subgraphisomorphism; @@ -38,6 +40,7 @@ pub(crate) mod ksatisfiability_minimumvertexcover; pub(crate) mod ksatisfiability_qubo; pub(crate) mod ksatisfiability_subsetsum; pub(crate) mod longestcommonsubsequence_maximumindependentset; +pub(crate) mod maxcut_minimumcutintoboundedsets; pub(crate) mod maximumclique_maximumindependentset; mod maximumindependentset_casts; mod maximumindependentset_gridgraph; @@ -57,6 +60,7 @@ pub(crate) mod minimumvertexcover_minimumhittingset; pub(crate) mod minimumvertexcover_minimumsetcovering; pub(crate) mod naesatisfiability_maxcut; pub(crate) mod paintshop_qubo; +pub(crate) mod partition_binpacking; pub(crate) mod partition_cosineproductintegration; pub(crate) mod partition_knapsack; pub(crate) mod partition_multiprocessorscheduling; @@ -78,6 +82,11 @@ pub(crate) mod subsetsum_capacityassignment; pub(crate) mod subsetsum_closestvectorproblem; #[cfg(test)] pub(crate) mod test_helpers; +pub(crate) mod threepartition_flowshopscheduling; +pub(crate) mod threepartition_jobshopscheduling; +pub(crate) mod threepartition_resourceconstrainedscheduling; +pub(crate) mod threepartition_sequencingtominimizeweightedtardiness; +pub(crate) mod threepartition_sequencingwithreleasetimesanddeadlines; mod traits; pub(crate) mod travelingsalesman_qubo; @@ -332,6 +341,18 @@ pub(crate) fn canonical_rule_example_specs() -> Vec Vec 3 jobs, 6 tasks // All elements go to group 0: config = [0, 0, 0] let source = ThreePartition::new(vec![4, 5, 6], 15); - let reduction = ReduceTo::::reduce_to(&source); + let _reduction = ReduceTo::::reduce_to(&source); // For m=1, any ordering works. Use identity ordering on both machines. // Machine 0: 3 tasks => Lehmer [0, 0, 0] diff --git a/src/unit_tests/rules/analysis.rs b/src/unit_tests/rules/analysis.rs index 73da4b26..a12dd4a2 100644 --- a/src/unit_tests/rules/analysis.rs +++ b/src/unit_tests/rules/analysis.rs @@ -259,6 +259,8 @@ fn test_find_dominated_rules_returns_known_set() { "MaximumMatching {graph: \"SimpleGraph\", weight: \"i32\"}", "ILP {variable: \"bool\"}", ), + // ExactCoverBy3Sets → MaxSetPacking → ILP is better than direct ExactCoverBy3Sets → ILP + ("ExactCoverBy3Sets", "ILP {variable: \"bool\"}"), ] .into_iter() .collect(); diff --git a/src/unit_tests/rules/threepartition_resourceconstrainedscheduling.rs b/src/unit_tests/rules/threepartition_resourceconstrainedscheduling.rs index dcb9990b..e33249f7 100644 --- a/src/unit_tests/rules/threepartition_resourceconstrainedscheduling.rs +++ b/src/unit_tests/rules/threepartition_resourceconstrainedscheduling.rs @@ -97,7 +97,7 @@ fn test_threepartition_to_resourceconstrainedscheduling_single_triple() { } #[test] -fn test_threepartition_to_resourceconstrainedscheduling_infeasible() { +fn test_threepartition_to_resourceconstrainedscheduling_two_triples() { // sizes [4, 4, 7, 4, 4, 7], B=15, m=2 // Only valid grouping of triples summing to 15: {4, 4, 7} and {4, 4, 7} // This is actually feasible. Let's pick something infeasible. From f67e6ecea0cf523f7ccee7b0d63001b2b0207718 Mon Sep 17 00:00:00 2001 From: zazabap Date: Tue, 31 Mar 2026 09:38:11 +0000 Subject: [PATCH 3/3] fix: use MinimizeStepsThenOverhead in MaxCut->QUBO parity test The new reduction rules (especially MaxCut -> MinCutBounded) created an alternative 3-step path to QUBO via complete graph construction, producing O(n^4) QUBO variables. MinimizeSteps tied at 3 steps and the tie-breaker selected this expensive path. Using MinimizeStepsThenOverhead with actual problem size (Petersen: 10v, 15e) ensures the compact SpinGlass path is chosen. Co-Authored-By: Claude Opus 4.6 (1M context) --- src/unit_tests/rules/reduction_path_parity.rs | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/unit_tests/rules/reduction_path_parity.rs b/src/unit_tests/rules/reduction_path_parity.rs index f86c1ec1..9a772159 100644 --- a/src/unit_tests/rules/reduction_path_parity.rs +++ b/src/unit_tests/rules/reduction_path_parity.rs @@ -6,7 +6,7 @@ use crate::models::algebraic::QUBO; use crate::models::graph::{MaxCut, SpinGlass}; use crate::models::misc::Factoring; use crate::rules::test_helpers::assert_optimization_round_trip_chain; -use crate::rules::{MinimizeSteps, ReductionGraph}; +use crate::rules::{MinimizeSteps, MinimizeStepsThenOverhead, ReductionGraph}; use crate::solvers::BruteForce; use crate::topology::SimpleGraph; use crate::traits::Problem; @@ -73,14 +73,15 @@ fn test_jl_parity_maxcut_to_qubo_path() { let graph = ReductionGraph::new(); let src_var = ReductionGraph::variant_to_map(&MaxCut::::variant()); let dst_var = ReductionGraph::variant_to_map(&QUBO::::variant()); + // Use Petersen graph size to pick the path with smallest output let rpath = graph .find_cheapest_path( "MaxCut", &src_var, "QUBO", &dst_var, - &ProblemSize::new(vec![]), - &MinimizeSteps, + &ProblemSize::new(vec![("num_vertices", 10), ("num_edges", 15)]), + &MinimizeStepsThenOverhead, ) .expect("Should find path MaxCut -> QUBO");