From e23d47670b7fcdb81a619f1f84ddce175bd21653 Mon Sep 17 00:00:00 2001 From: Paul Romano Date: Thu, 31 Jul 2025 12:47:53 +0700 Subject: [PATCH 01/14] Implement Model.keff_search method --- openmc/model/model.py | 102 +++++++++++++++++++++++++++++++++++++++++- 1 file changed, 101 insertions(+), 1 deletion(-) diff --git a/openmc/model/model.py b/openmc/model/model.py index 03fdcda1fc8..46ef9cd254e 100644 --- a/openmc/model/model.py +++ b/openmc/model/model.py @@ -1,5 +1,5 @@ from __future__ import annotations -from collections.abc import Iterable, Sequence +from collections.abc import Callable, Iterable, Sequence import copy from functools import lru_cache from pathlib import Path @@ -8,11 +8,13 @@ import random import re from tempfile import NamedTemporaryFile, TemporaryDirectory +from typing import Any, Protocol import warnings import h5py import lxml.etree as ET import numpy as np +import scipy.optimize as sopt import openmc import openmc._xml as xml @@ -24,6 +26,12 @@ from openmc.utility_funcs import change_directory +# Protocol for a function that is passed to search_keff +class ModelModifier(Protocol): + def __call__(self, val: float, **kwargs: Any) -> None: + ... + + class Model: """Model container. @@ -2151,3 +2159,95 @@ def _replace_infinity(value): # Take a wild guess as to how many rays are needed self.settings.particles = 2 * int(max_length) + + + def keff_search( + self, + func: ModelModifier, + target: float = 1.0, + method: str | None = None, + print_iterations: bool = False, + func_kwargs: dict[str, Any] | None = None, + run_kwargs: dict[str, Any] | None = None, + **kwargs + ): + """Perform a keff search on a model parametrized by a single variable. + + Parameters + ---------- + func : ModelModifier + Function that takes the parameter to be searched and makes a + modification to the model. + target : float, optional + keff value to search for + method : str, optional + Method to use for the root finding. + print_iterations : bool, optional + Whether or not to print the guess and the resultant keff during the + iteration process. + func_kwargs : dict, optional + Keyword-based arguments to pass to the `func` function. + run_kwargs : dict, optional + Keyword arguments to pass to :meth:`openmc.Model.run`. + + .. versionadded:: 0.13.1 + **kwargs + All remaining keyword arguments are passed to the root-finding + method. + + Returns + ------- + zero_value : float + Estimated value of the variable parameter where keff is the targeted + value + guesses : List of Real + List of guesses attempted by the search + results : List of 2-tuple of Real + List of keffs and uncertainties corresponding to the guess attempted + by the search + + """ + + check_type('model modifier', func, Callable) + check_type('target', target, Real) + func_kwargs = {} if func_kwargs is None else dict(func_kwargs) + run_kwargs = {} if run_kwargs is None else dict(run_kwargs) + run_kwargs.setdefault('output', False) + + # Set the iteration data storage variables + guesses = [] + results = [] + + # Define the function to be passed to root_scalar + def f(guess: float) -> float: + # Modify the model with the current guess + func(guess, **func_kwargs) + + # Run the model and obtain keff + sp_filepath = self.run(**run_kwargs) + with openmc.StatePoint(sp_filepath) as sp: + keff = sp.keff + + # Record the history + guesses.append(guess) + results.append(keff) + + if print_iterations: + print(f'Iteration: {len(guesses)}; {guess=:.2e}, {keff=:.5f}') + + return keff.n - target + + # Set arguments to be passed to root_scalar. Note that 'args' are extra + # arguments passed to _search_keff (does not include the 'guess' since that + # is handled by root_scalar) + kwargs['f'] = f + kwargs['args'] = (self, target, func, func_kwargs, print_iterations, + run_kwargs, guesses, results) + kwargs.setdefault('method', method) + + # Perform the search in a temporary directrory + with TemporaryDirectory() as tmpdir: + run_kwargs.setdefault('cwd', tmpdir) + sol = sopt.root_scalar(**kwargs) + + return sol.root, guesses, results From 9ab24db19da20a95a1f5a06b89ca863c00db3c1d Mon Sep 17 00:00:00 2001 From: Paul Romano Date: Sat, 9 Aug 2025 18:48:31 +0700 Subject: [PATCH 02/14] Implement GRsecant algorithm --- openmc/model/model.py | 144 +++++++++++++++++++++++++++++++++++++----- 1 file changed, 129 insertions(+), 15 deletions(-) diff --git a/openmc/model/model.py b/openmc/model/model.py index 46ef9cd254e..a674ca593c8 100644 --- a/openmc/model/model.py +++ b/openmc/model/model.py @@ -1,6 +1,7 @@ from __future__ import annotations from collections.abc import Callable, Iterable, Sequence import copy +from dataclasses import dataclass from functools import lru_cache from pathlib import Path import math @@ -14,7 +15,7 @@ import h5py import lxml.etree as ET import numpy as np -import scipy.optimize as sopt +from scipy.optimize import curve_fit import openmc import openmc._xml as xml @@ -2165,7 +2166,6 @@ def keff_search( self, func: ModelModifier, target: float = 1.0, - method: str | None = None, print_iterations: bool = False, func_kwargs: dict[str, Any] | None = None, run_kwargs: dict[str, Any] | None = None, @@ -2180,8 +2180,6 @@ def keff_search( modification to the model. target : float, optional keff value to search for - method : str, optional - Method to use for the root finding. print_iterations : bool, optional Whether or not to print the guess and the resultant keff during the iteration process. @@ -2219,10 +2217,13 @@ def keff_search( results = [] # Define the function to be passed to root_scalar - def f(guess: float) -> float: + def f(guess: float, generations: int) -> float: # Modify the model with the current guess func(guess, **func_kwargs) + # Change the number of batches + self.settings.batches = self.settings.inactive + generations + # Run the model and obtain keff sp_filepath = self.run(**run_kwargs) with openmc.StatePoint(sp_filepath) as sp: @@ -2233,21 +2234,134 @@ def f(guess: float) -> float: results.append(keff) if print_iterations: - print(f'Iteration: {len(guesses)}; {guess=:.2e}, {keff=:.5f}') + print(f'Iteration: {len(guesses)}; {generations=}, {guess=:.2e}, {keff=:.5f}') - return keff.n - target + return keff.n - target, keff.s - # Set arguments to be passed to root_scalar. Note that 'args' are extra - # arguments passed to _search_keff (does not include the 'guess' since that - # is handled by root_scalar) + # Set arguments to be passed to root_scalar_grsecant kwargs['f'] = f - kwargs['args'] = (self, target, func, func_kwargs, print_iterations, - run_kwargs, guesses, results) - kwargs.setdefault('method', method) + kwargs.setdefault('g0', self.settings.batches - self.settings.inactive) - # Perform the search in a temporary directrory + # Perform the search in a temporary directory with TemporaryDirectory() as tmpdir: run_kwargs.setdefault('cwd', tmpdir) - sol = sopt.root_scalar(**kwargs) + sol = root_scalar_grsecant(**kwargs) return sol.root, guesses, results + + +@dataclass +class RootResult: + root: float + converged: bool + iterations: int + function_calls: int + flag: str + + +def root_scalar_grsecant( + f, + x0: float, + x1: float, + memory: int = 4, + k_tol: float = 1e-4, + sigma_final: float = 3e-4, + p: float = 0.5, + sigma_factor: float = 0.95, + g0: int = 200, + g1: int = 200, + min_g: int = 20, + max_g: int | None = None, + x_min: float | None = None, + x_max: float | None = None, + maxiter: int = 50, + **kwargs: Any +) -> RootResult: + """ + GRsecant with MC-aware uncertainty/effort selection. + + fun(x, model, **mc_kwargs) -> (mean, std) + The solver passes {generations_kw: g} on each call. + """ + if memory < 2: + raise ValueError("memory must be ≥ 2") + + xs, fs, ss, gs = [], [], [], [] + + def eval_at(x: float, g: int): + mc_kwargs = dict(kwargs) + mc_kwargs["generations"] = int(g) + f_mean, f_std = f(x, **mc_kwargs) + xs.append(float(x)) + fs.append(float(f_mean)) + ss.append(float(f_std)) + gs.append(int(g)) + return fs[-1], ss[-1] + + # ---- Seed with two evaluations + f0, s0 = eval_at(x0, g0) + if abs(f0) <= k_tol and s0 <= sigma_final: + return RootResult(x0, True, 0, 1, "converged(init)") + f1, s1 = eval_at(x1, g1) + if abs(f1) <= k_tol and s1 <= sigma_final: + return RootResult(x1, True, 0, 2, "converged(init)") + + for it in range(maxiter): + # ---------- Step 1: propose next x via GRsecant + m = min(memory, len(xs)) + + # Perform a curve fit on f(x) = a + bx accounting for uncertainties. + # This is equivalent to minimizing the function in Equation (A.14) + (a, b), _ = curve_fit( + lambda x, a, b: a + b*x, + xs[-m:], fs[-m:], sigma=ss[-m:], absolute_sigma=True + ) + x_new = float(-a / b) + + # Clamp x_new to the bounds if provided + if x_min is not None: + x_new = max(x_new, x_min) + if x_max is not None: + x_new = min(x_new, x_max) + + # ---------- Step 2: choose target σ for next run (Eq. 8 + clamp) + + min_abs_f = float(np.min(np.abs(fs))) + base = sigma_factor * sigma_final + ratio = min_abs_f / k_tol if k_tol > 0 else 1.0 + sig = base * (ratio ** p) + sig_target = max(sig, base) + + # ---------- Step 3: choose generations to hit σ_target (Appendix C) + + # Use at least two past points for regression; otherwise, assume r≈0.5 + if len(gs) >= 4 and np.var(np.log(gs)) > 0.0: + # Perform a curve fit on ln(sigma) = ln(k) - r*ln(g) to solve for + # ln(k) and r. + popt, _ = curve_fit( + lambda ln_g, ln_k, r: ln_k - r*ln_g, + np.log(gs), np.log(ss), absolute_sigma=False + ) + ln_k, r = popt + k = float(np.exp(ln_k)) + else: + r = 0.5 + k = float(ss[-1] * gs[-1]**r) + + g_new = (k / sig_target) ** (1 / r) + + # Clamp and round up to integer + g_new = max(min_g, math.ceil(g_new)) + if max_g is not None: + g_new = min(g_new, max_g) + + # Evaluate at proposed x with chosen effort + f_new, s_new = eval_at(x_new, g_new) + + #print(f'param_k={k}, {r=}, {sig_target=}, {g_new=}, {f_new=}, {s_new=}') + + # Termination: both criteria (|f| and σ) as in the paper + if (abs(f_new) <= k_tol and s_new <= sigma_final): + return RootResult(x_new, True, it + 1, len(xs), "converged") + + return RootResult(xs[-1], False, maxiter, len(xs), "maxiter") From b7d4ef83a1ca17bac1a59ce52eac073fadca704b Mon Sep 17 00:00:00 2001 From: Paul Romano Date: Mon, 11 Aug 2025 11:21:52 +0700 Subject: [PATCH 03/14] Determine number of generations based on r=0.5 --- openmc/model/model.py | 31 ++++++++++++++++++------------- 1 file changed, 18 insertions(+), 13 deletions(-) diff --git a/openmc/model/model.py b/openmc/model/model.py index a674ca593c8..1291b293d2a 100644 --- a/openmc/model/model.py +++ b/openmc/model/model.py @@ -2259,6 +2259,14 @@ class RootResult: flag: str +# TODO: Consistent use of bracket vs x0, x1 +# TODO: Account for min/max search interval (vs initial points) +# TODO: Allow for passing a single x0 value? +# TODO: Clean up use of g0, g1 +# TODO: Reporting total number of generations? +# TODO: Fit on sigam vs g is done with last 4 points. Change this? +# TODO: Change order of attributes in RootResult / change return datatype? + def root_scalar_grsecant( f, x0: float, @@ -2334,21 +2342,20 @@ def eval_at(x: float, g: int): # ---------- Step 3: choose generations to hit σ_target (Appendix C) - # Use at least two past points for regression; otherwise, assume r≈0.5 - if len(gs) >= 4 and np.var(np.log(gs)) > 0.0: - # Perform a curve fit on ln(sigma) = ln(k) - r*ln(g) to solve for - # ln(k) and r. - popt, _ = curve_fit( - lambda ln_g, ln_k, r: ln_k - r*ln_g, - np.log(gs), np.log(ss), absolute_sigma=False + # Use at least two past points for regression + if len(gs) >= 2 and np.var(np.log(gs)) > 0.0: + # Perform a curve fit based on Eq. (C.3) to solve for ln(k). Note + # that unlike the paper, we do not leave r as an undetermined + # parameter and choose r=0.5. + (ln_k,), _ = curve_fit( + lambda ln_g, ln_k: ln_k - 0.5*ln_g, + np.log(gs[-4:]), np.log(ss[-4:]), ) - ln_k, r = popt k = float(np.exp(ln_k)) else: - r = 0.5 - k = float(ss[-1] * gs[-1]**r) + k = float(ss[-1] * math.sqrt(gs[-1])) - g_new = (k / sig_target) ** (1 / r) + g_new = (k / sig_target) ** 2 # Clamp and round up to integer g_new = max(min_g, math.ceil(g_new)) @@ -2358,8 +2365,6 @@ def eval_at(x: float, g: int): # Evaluate at proposed x with chosen effort f_new, s_new = eval_at(x_new, g_new) - #print(f'param_k={k}, {r=}, {sig_target=}, {g_new=}, {f_new=}, {s_new=}') - # Termination: both criteria (|f| and σ) as in the paper if (abs(f_new) <= k_tol and s_new <= sigma_final): return RootResult(x_new, True, it + 1, len(xs), "converged") From d48358ef090ad5ac576c25b52c44238f51f35261 Mon Sep 17 00:00:00 2001 From: Paul Romano Date: Mon, 11 Aug 2025 11:22:41 +0700 Subject: [PATCH 04/14] Add one more TODO --- openmc/model/model.py | 1 + 1 file changed, 1 insertion(+) diff --git a/openmc/model/model.py b/openmc/model/model.py index 1291b293d2a..86304f10397 100644 --- a/openmc/model/model.py +++ b/openmc/model/model.py @@ -2266,6 +2266,7 @@ class RootResult: # TODO: Reporting total number of generations? # TODO: Fit on sigam vs g is done with last 4 points. Change this? # TODO: Change order of attributes in RootResult / change return datatype? +# TODO: Support use of openmc.lib def root_scalar_grsecant( f, From 705b37d4a58575df05c492bbe34034dfde3510fe Mon Sep 17 00:00:00 2001 From: Paul Romano Date: Wed, 10 Sep 2025 21:20:58 -0500 Subject: [PATCH 05/14] Lots of updates to keff_search and grsecant --- openmc/model/model.py | 182 ++++++++++++++++++++++++++++-------------- 1 file changed, 122 insertions(+), 60 deletions(-) diff --git a/openmc/model/model.py b/openmc/model/model.py index 86304f10397..85828df6de0 100644 --- a/openmc/model/model.py +++ b/openmc/model/model.py @@ -1,7 +1,7 @@ from __future__ import annotations from collections.abc import Callable, Iterable, Sequence import copy -from dataclasses import dataclass +from dataclasses import dataclass, field from functools import lru_cache from pathlib import Path import math @@ -2161,16 +2161,17 @@ def _replace_infinity(value): # Take a wild guess as to how many rays are needed self.settings.particles = 2 * int(max_length) - def keff_search( self, func: ModelModifier, + x0: float, + x1: float, target: float = 1.0, print_iterations: bool = False, func_kwargs: dict[str, Any] | None = None, run_kwargs: dict[str, Any] | None = None, **kwargs - ): + ) -> SearchResult: """Perform a keff search on a model parametrized by a single variable. Parameters @@ -2178,6 +2179,10 @@ def keff_search( func : ModelModifier Function that takes the parameter to be searched and makes a modification to the model. + x0 : float + First guess for the parameter passed to `func` + x1 : float + Second guess for the parameter passed to `func` target : float, optional keff value to search for print_iterations : bool, optional @@ -2187,11 +2192,9 @@ def keff_search( Keyword-based arguments to pass to the `func` function. run_kwargs : dict, optional Keyword arguments to pass to :meth:`openmc.Model.run`. - - .. versionadded:: 0.13.1 **kwargs - All remaining keyword arguments are passed to the root-finding - method. + All remaining keyword arguments are passed to + :func:`grsecant`. Returns ------- @@ -2212,91 +2215,150 @@ def keff_search( run_kwargs = {} if run_kwargs is None else dict(run_kwargs) run_kwargs.setdefault('output', False) - # Set the iteration data storage variables - guesses = [] - results = [] - - # Define the function to be passed to root_scalar - def f(guess: float, generations: int) -> float: + # Define the function to be passed to grsecant + def f(guess: float, batches: int) -> float: # Modify the model with the current guess func(guess, **func_kwargs) # Change the number of batches - self.settings.batches = self.settings.inactive + generations + self.settings.batches = self.settings.inactive + batches # Run the model and obtain keff sp_filepath = self.run(**run_kwargs) with openmc.StatePoint(sp_filepath) as sp: keff = sp.keff - # Record the history - guesses.append(guess) - results.append(keff) - if print_iterations: - print(f'Iteration: {len(guesses)}; {generations=}, {guess=:.2e}, {keff=:.5f}') + print(f'{batches=}, {guess=:.2e}, {keff=:.5f}') return keff.n - target, keff.s - # Set arguments to be passed to root_scalar_grsecant + # Set arguments to be passed to grsecant kwargs['f'] = f - kwargs.setdefault('g0', self.settings.batches - self.settings.inactive) + kwargs['x0'] = x0 + kwargs['x1'] = x1 + kwargs.setdefault('b0', self.settings.batches - self.settings.inactive) + kwargs.setdefault('b1', self.settings.batches - self.settings.inactive) # Perform the search in a temporary directory with TemporaryDirectory() as tmpdir: run_kwargs.setdefault('cwd', tmpdir) - sol = root_scalar_grsecant(**kwargs) - - return sol.root, guesses, results + return grsecant(**kwargs) @dataclass -class RootResult: +class SearchResult: + """Result of a GRsecant keff search. + + Attributes + ---------- + root : float + Estimated parameter value where f(x) = 0 at termination. + parameters : list[float] + Parameter values (x) evaluated during the search, in order. + keffs : list[float] + Estimated keff values for each evaluation. + stdevs : list[float] + One-sigma uncertainties of keff for each evaluation. + batches : list[int] + Number of active batches used for each evaluation. + converged : bool + Whether both |f| <= k_tol and sigma <= sigma_final were met. + flag : str + Reason for termination (e.g., "converged", "maxiter"). + """ root: float + parameters: list[float] = field(repr=False) + means: list[float] = field(repr=False) + stdevs: list[float] = field(repr=False) + batches: list[int] = field(repr=False) converged: bool - iterations: int - function_calls: int flag: str + @property + def function_calls(self) -> int: + """Number of function evaluations performed.""" + return len(self.parameters) + + @property + def total_batches(self) -> int: + """Total number of active batches used across all evaluations.""" + return sum(self.batches) + + -# TODO: Consistent use of bracket vs x0, x1 -# TODO: Account for min/max search interval (vs initial points) # TODO: Allow for passing a single x0 value? -# TODO: Clean up use of g0, g1 -# TODO: Reporting total number of generations? -# TODO: Fit on sigam vs g is done with last 4 points. Change this? -# TODO: Change order of attributes in RootResult / change return datatype? +# TODO: Fit on sigma vs b is done with last 4 points. Change this? # TODO: Support use of openmc.lib -def root_scalar_grsecant( +def grsecant( f, x0: float, x1: float, + x_min: float | None = None, + x_max: float | None = None, + b0: int = 200, + b1: int = 200, + b_min: int = 20, + b_max: int | None = None, memory: int = 4, k_tol: float = 1e-4, sigma_final: float = 3e-4, p: float = 0.5, sigma_factor: float = 0.95, - g0: int = 200, - g1: int = 200, - min_g: int = 20, - max_g: int | None = None, - x_min: float | None = None, - x_max: float | None = None, maxiter: int = 50, **kwargs: Any -) -> RootResult: - """ - GRsecant with MC-aware uncertainty/effort selection. +) -> SearchResult: + """GRsecant with MC-aware uncertainty/effort selection. + + Parameters + ---------- + f : function + Function that takes a parameter and returns the mean and standard + deviation. + x0 : float + First guess for the parameter passed to `f` + x1 : float + Second guess for the parameter passed to `f` + x_min : float + Minimum value for x. + x_max : float + Maximum value for x. + b0 : int + Number of batches to use in first function evaluation. + b1 : int + Number of batches to use in second function evaluation. + b_min : int + Minimum number of batches to use in a given function evaluation. + b_max : int + Maximum number of batches to use in a given function evaluation. + memory : int + Number of points to use in curve fit of f for predicting the next point. + k_tol : float + Stopping criterion on function value; value must be within k_tol of zero + to be accepted. + sigma_final : float + Maximum accepted uncertainty in f(x) for Eq. 8 stopping criterion. + p : float + Exponent in Eq. 8 stopping criteria. + sigma_factor : float + Multiplicative factor in Eq. 8 stopping criteria. + maxiter : int + Maximum number of iterations to perform. + **kwargs + Additional keyword arguments to pass to `f`. - fun(x, model, **mc_kwargs) -> (mean, std) - The solver passes {generations_kw: g} on each call. """ if memory < 2: raise ValueError("memory must be ≥ 2") - xs, fs, ss, gs = [], [], [], [] + # Create lists to store the history of evaluations + xs = [] + fs = [] + ss = [] + gs = [] + # Helper function to evaluate f and store results def eval_at(x: float, g: int): mc_kwargs = dict(kwargs) mc_kwargs["generations"] = int(g) @@ -2308,12 +2370,12 @@ def eval_at(x: float, g: int): return fs[-1], ss[-1] # ---- Seed with two evaluations - f0, s0 = eval_at(x0, g0) + f0, s0 = eval_at(x0, b0) if abs(f0) <= k_tol and s0 <= sigma_final: - return RootResult(x0, True, 0, 1, "converged(init)") - f1, s1 = eval_at(x1, g1) + return SearchResult(x0, xs, fs, ss, gs, True, "converged") + f1, s1 = eval_at(x1, b1) if abs(f1) <= k_tol and s1 <= sigma_final: - return RootResult(x1, True, 0, 2, "converged(init)") + return SearchResult(x1, xs, fs, ss, gs, True, "converged") for it in range(maxiter): # ---------- Step 1: propose next x via GRsecant @@ -2346,28 +2408,28 @@ def eval_at(x: float, g: int): # Use at least two past points for regression if len(gs) >= 2 and np.var(np.log(gs)) > 0.0: # Perform a curve fit based on Eq. (C.3) to solve for ln(k). Note - # that unlike the paper, we do not leave r as an undetermined + # that unlike in the paper, we do not leave r as an undetermined # parameter and choose r=0.5. (ln_k,), _ = curve_fit( - lambda ln_g, ln_k: ln_k - 0.5*ln_g, + lambda ln_b, ln_k: ln_k - 0.5*ln_b, np.log(gs[-4:]), np.log(ss[-4:]), ) k = float(np.exp(ln_k)) else: k = float(ss[-1] * math.sqrt(gs[-1])) - g_new = (k / sig_target) ** 2 + b_new = (k / sig_target) ** 2 # Clamp and round up to integer - g_new = max(min_g, math.ceil(g_new)) - if max_g is not None: - g_new = min(g_new, max_g) + b_new = max(b_min, math.ceil(b_new)) + if b_max is not None: + b_new = min(b_new, b_max) # Evaluate at proposed x with chosen effort - f_new, s_new = eval_at(x_new, g_new) + f_new, s_new = eval_at(x_new, b_new) # Termination: both criteria (|f| and σ) as in the paper - if (abs(f_new) <= k_tol and s_new <= sigma_final): - return RootResult(x_new, True, it + 1, len(xs), "converged") + if abs(f_new) <= k_tol and s_new <= sigma_final: + return SearchResult(x_new, xs, fs, ss, gs, True, "converged") - return RootResult(xs[-1], False, maxiter, len(xs), "maxiter") + return SearchResult(xs[-1], xs, fs, ss, gs, False, "maxiter") From eec0f9418813f7e624b28ec57e014d2c09bfc80a Mon Sep 17 00:00:00 2001 From: Paul Romano Date: Wed, 10 Sep 2025 22:54:31 -0500 Subject: [PATCH 06/14] Inline grsecant in Model.keff_search --- openmc/model/model.py | 316 +++++++++++++++++++----------------------- 1 file changed, 142 insertions(+), 174 deletions(-) diff --git a/openmc/model/model.py b/openmc/model/model.py index 85828df6de0..159555e2d73 100644 --- a/openmc/model/model.py +++ b/openmc/model/model.py @@ -2167,13 +2167,26 @@ def keff_search( x0: float, x1: float, target: float = 1.0, + k_tol: float = 1e-4, + sigma_final: float = 3e-4, + p: float = 0.5, + sigma_factor: float = 0.95, + memory: int = 4, + x_min: float | None = None, + x_max: float | None = None, + b0: int | None = None, + b1: int | None = None, + b_min: int = 20, + b_max: int | None = None, + maxiter: int = 50, print_iterations: bool = False, func_kwargs: dict[str, Any] | None = None, run_kwargs: dict[str, Any] | None = None, - **kwargs ) -> SearchResult: """Perform a keff search on a model parametrized by a single variable. + TODO: Describe GRsecant + Parameters ---------- func : ModelModifier @@ -2185,6 +2198,35 @@ def keff_search( Second guess for the parameter passed to `func` target : float, optional keff value to search for + k_tol : float, optional + Stopping criterion on the function value; the absolute value must + be within ``k_tol`` of zero to be accepted. + sigma_final : float, optional + Maximum accepted uncertainty in ``f(x)`` for the Eq. 8 stopping + criterion. + p : float, optional + Exponent used in the Eq. 8 stopping criterion. + sigma_factor : float, optional + Multiplicative factor used in the Eq. 8 stopping criterion. + memory : int, optional + Number of most-recent points used in the weighted linear fit of + ``f(x) = a + b x`` to predict the next point. + x_min : float, optional + Minimum allowed value for the parameter ``x``. + x_max : float, optional + Maximum allowed value for the parameter ``x``. + b0 : int, optional + Number of active batches to use in the first function evaluation. + If None, uses the model's current setting. + b1 : int, optional + Number of active batches to use in the second function evaluation. + If None, uses the model's current setting. + b_min : int, optional + Minimum number of active batches to use in a function evaluation. + b_max : int, optional + Maximum number of active batches to use in a function evaluation. + maxiter : int, optional + Maximum number of iterations to perform. print_iterations : bool, optional Whether or not to print the guess and the resultant keff during the iteration process. @@ -2192,33 +2234,36 @@ def keff_search( Keyword-based arguments to pass to the `func` function. run_kwargs : dict, optional Keyword arguments to pass to :meth:`openmc.Model.run`. - **kwargs - All remaining keyword arguments are passed to - :func:`grsecant`. Returns ------- - zero_value : float - Estimated value of the variable parameter where keff is the targeted - value - guesses : List of Real - List of guesses attempted by the search - results : List of 2-tuple of Real - List of keffs and uncertainties corresponding to the guess attempted - by the search + SearchResult + Result object containing the estimated root (parameter value) and + evaluation history (parameters, means, standard deviations, and + batches), plus convergence status and termination reason. """ check_type('model modifier', func, Callable) check_type('target', target, Real) + if memory < 2: + raise ValueError("memory must be ≥ 2") func_kwargs = {} if func_kwargs is None else dict(func_kwargs) run_kwargs = {} if run_kwargs is None else dict(run_kwargs) run_kwargs.setdefault('output', False) - # Define the function to be passed to grsecant - def f(guess: float, batches: int) -> float: + # Create lists to store the history of evaluations + xs: list[float] = [] + fs: list[float] = [] + ss: list[float] = [] + gs: list[int] = [] + count = 0 + + # Helper function to evaluate f and store results + # TODO: Support use of openmc.lib + def eval_at(x: float, batches: int) -> tuple[float, float]: # Modify the model with the current guess - func(guess, **func_kwargs) + func(x, **func_kwargs) # Change the number of batches self.settings.batches = self.settings.inactive + batches @@ -2229,21 +2274,91 @@ def f(guess: float, batches: int) -> float: keff = sp.keff if print_iterations: - print(f'{batches=}, {guess=:.2e}, {keff=:.5f}') + nonlocal count + count += 1 + print(f'Iteration {count}: {batches=}, {x=:.2e}, {keff=:.5f}') + + xs.append(float(x)) + fs.append(float(keff.n - target)) + ss.append(float(keff.s)) + gs.append(int(batches)) + return fs[-1], ss[-1] + + # Default b0/b1 to current model settings if not explicitly provided + if b0 is None: + b0 = self.settings.batches - self.settings.inactive + if b1 is None: + b1 = self.settings.batches - self.settings.inactive + + # Perform the search (inlined GRsecant) in a temporary directory + with TemporaryDirectory() as tmpdir: + run_kwargs.setdefault('cwd', tmpdir) - return keff.n - target, keff.s + # ---- Seed with two evaluations + f0, s0 = eval_at(x0, b0) + if abs(f0) <= k_tol and s0 <= sigma_final: + return SearchResult(x0, xs, fs, ss, gs, True, "converged") + f1, s1 = eval_at(x1, b1) + if abs(f1) <= k_tol and s1 <= sigma_final: + return SearchResult(x1, xs, fs, ss, gs, True, "converged") + + for _ in range(maxiter): + # ------ Step 1: propose next x via GRsecant + m = min(memory, len(xs)) + + # Perform a curve fit on f(x) = a + bx accounting for + # uncertainties. This is equivalent to minimizing the function + # in Equation (A.14) + (a, b), _ = curve_fit( + lambda x, a, b: a + b*x, + xs[-m:], fs[-m:], sigma=ss[-m:], absolute_sigma=True + ) + x_new = float(-a / b) + + # Clamp x_new to the bounds if provided + if x_min is not None: + x_new = max(x_new, x_min) + if x_max is not None: + x_new = min(x_new, x_max) + + # ------ Step 2: choose target σ for next run (Eq. 8 + clamp) + + min_abs_f = float(np.min(np.abs(fs))) + base = sigma_factor * sigma_final + ratio = min_abs_f / k_tol if k_tol > 0 else 1.0 + sig = base * (ratio ** p) + sig_target = max(sig, base) + + # ------ Step 3: choose generations to hit σ_target (Appendix C) + + # Use at least two past points for regression + if len(gs) >= 2 and np.var(np.log(gs)) > 0.0: + # Perform a curve fit based on Eq. (C.3) to solve for ln(k). + # Note that unlike in the paper, we do not leave r as an + # undetermined parameter and choose r=0.5. + (ln_k,), _ = curve_fit( + lambda ln_b, ln_k: ln_k - 0.5*ln_b, + np.log(gs[-4:]), np.log(ss[-4:]), + ) + k = float(np.exp(ln_k)) + else: + k = float(ss[-1] * math.sqrt(gs[-1])) - # Set arguments to be passed to grsecant - kwargs['f'] = f - kwargs['x0'] = x0 - kwargs['x1'] = x1 - kwargs.setdefault('b0', self.settings.batches - self.settings.inactive) - kwargs.setdefault('b1', self.settings.batches - self.settings.inactive) + b_new = (k / sig_target) ** 2 - # Perform the search in a temporary directory - with TemporaryDirectory() as tmpdir: - run_kwargs.setdefault('cwd', tmpdir) - return grsecant(**kwargs) + # Clamp and round up to integer + b_new = max(b_min, math.ceil(b_new)) + if b_max is not None: + b_new = min(b_new, b_max) + + # Evaluate at proposed x with batches determined above + f_new, s_new = eval_at(x_new, b_new) + + # Termination based on both criteria (|f| and σ) + if abs(f_new) <= k_tol and s_new <= sigma_final: + return SearchResult(x_new, xs, fs, ss, gs, True, "converged") + + return SearchResult(xs[-1], xs, fs, ss, gs, False, "maxiter") @dataclass @@ -2286,150 +2401,3 @@ def total_batches(self) -> int: return sum(self.batches) - -# TODO: Allow for passing a single x0 value? -# TODO: Fit on sigma vs b is done with last 4 points. Change this? -# TODO: Support use of openmc.lib - -def grsecant( - f, - x0: float, - x1: float, - x_min: float | None = None, - x_max: float | None = None, - b0: int = 200, - b1: int = 200, - b_min: int = 20, - b_max: int | None = None, - memory: int = 4, - k_tol: float = 1e-4, - sigma_final: float = 3e-4, - p: float = 0.5, - sigma_factor: float = 0.95, - maxiter: int = 50, - **kwargs: Any -) -> SearchResult: - """GRsecant with MC-aware uncertainty/effort selection. - - Parameters - ---------- - f : function - Function that takes a parameter and returns the mean and standard - deviation. - x0 : float - First guess for the parameter passed to `f` - x1 : float - Second guess for the parameter passed to `f` - x_min : float - Minimum value for x. - x_max : float - Maximum value for x. - b0 : int - Number of batches to use in first function evaluation. - b1 : int - Number of batches to use in second function evaluation. - b_min : int - Minimum number of batches to use in a given function evaluation. - b_max : int - Maximum number of batches to use in a given function evaluation. - memory : int - Number of points to use in curve fit of f for predicting the next point. - k_tol : float - Stopping criterion on function value; value must be within k_tol of zero - to be accepted. - sigma_final : float - Maximum accepted uncertainty in f(x) for Eq. 8 stopping criterion. - p : float - Exponent in Eq. 8 stopping criteria. - sigma_factor : float - Multiplicative factor in Eq. 8 stopping criteria. - maxiter : int - Maximum number of iterations to perform. - **kwargs - Additional keyword arguments to pass to `f`. - - """ - if memory < 2: - raise ValueError("memory must be ≥ 2") - - # Create lists to store the history of evaluations - xs = [] - fs = [] - ss = [] - gs = [] - - # Helper function to evaluate f and store results - def eval_at(x: float, g: int): - mc_kwargs = dict(kwargs) - mc_kwargs["generations"] = int(g) - f_mean, f_std = f(x, **mc_kwargs) - xs.append(float(x)) - fs.append(float(f_mean)) - ss.append(float(f_std)) - gs.append(int(g)) - return fs[-1], ss[-1] - - # ---- Seed with two evaluations - f0, s0 = eval_at(x0, b0) - if abs(f0) <= k_tol and s0 <= sigma_final: - return SearchResult(x0, xs, fs, ss, gs, True, "converged") - f1, s1 = eval_at(x1, b1) - if abs(f1) <= k_tol and s1 <= sigma_final: - return SearchResult(x1, xs, fs, ss, gs, True, "converged") - - for it in range(maxiter): - # ---------- Step 1: propose next x via GRsecant - m = min(memory, len(xs)) - - # Perform a curve fit on f(x) = a + bx accounting for uncertainties. - # This is equivalent to minimizing the function in Equation (A.14) - (a, b), _ = curve_fit( - lambda x, a, b: a + b*x, - xs[-m:], fs[-m:], sigma=ss[-m:], absolute_sigma=True - ) - x_new = float(-a / b) - - # Clamp x_new to the bounds if provided - if x_min is not None: - x_new = max(x_new, x_min) - if x_max is not None: - x_new = min(x_new, x_max) - - # ---------- Step 2: choose target σ for next run (Eq. 8 + clamp) - - min_abs_f = float(np.min(np.abs(fs))) - base = sigma_factor * sigma_final - ratio = min_abs_f / k_tol if k_tol > 0 else 1.0 - sig = base * (ratio ** p) - sig_target = max(sig, base) - - # ---------- Step 3: choose generations to hit σ_target (Appendix C) - - # Use at least two past points for regression - if len(gs) >= 2 and np.var(np.log(gs)) > 0.0: - # Perform a curve fit based on Eq. (C.3) to solve for ln(k). Note - # that unlike in the paper, we do not leave r as an undetermined - # parameter and choose r=0.5. - (ln_k,), _ = curve_fit( - lambda ln_b, ln_k: ln_k - 0.5*ln_b, - np.log(gs[-4:]), np.log(ss[-4:]), - ) - k = float(np.exp(ln_k)) - else: - k = float(ss[-1] * math.sqrt(gs[-1])) - - b_new = (k / sig_target) ** 2 - - # Clamp and round up to integer - b_new = max(b_min, math.ceil(b_new)) - if b_max is not None: - b_new = min(b_new, b_max) - - # Evaluate at proposed x with chosen effort - f_new, s_new = eval_at(x_new, b_new) - - # Termination: both criteria (|f| and σ) as in the paper - if abs(f_new) <= k_tol and s_new <= sigma_final: - return SearchResult(x_new, xs, fs, ss, gs, True, "converged") - - return SearchResult(xs[-1], xs, fs, ss, gs, False, "maxiter") From 00729922bf8bb3dba9603319a7d4d3a6cc167631 Mon Sep 17 00:00:00 2001 From: Paul Romano Date: Wed, 10 Sep 2025 22:59:37 -0500 Subject: [PATCH 07/14] Add description of GRsecant in docstring --- openmc/model/model.py | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/openmc/model/model.py b/openmc/model/model.py index 159555e2d73..d46aaf0bc9b 100644 --- a/openmc/model/model.py +++ b/openmc/model/model.py @@ -2185,7 +2185,13 @@ def keff_search( ) -> SearchResult: """Perform a keff search on a model parametrized by a single variable. - TODO: Describe GRsecant + This method uses the GRsecant method described in a paper by `Price and + Roskoff `_. The GRsecant + method is a modification of the secant method that accounts for + uncertainties in the function evaluations. The method uses a weighted + linear fit of the most recent function evaluations to predict the next + point to evaluate. It also adaptively changes the number of batches to + meet the target uncertainty value at each iteration. Parameters ---------- @@ -2199,8 +2205,8 @@ def keff_search( target : float, optional keff value to search for k_tol : float, optional - Stopping criterion on the function value; the absolute value must - be within ``k_tol`` of zero to be accepted. + Stopping criterion on the function value; the absolute value must be + within ``k_tol`` of zero to be accepted. sigma_final : float, optional Maximum accepted uncertainty in ``f(x)`` for the Eq. 8 stopping criterion. @@ -2216,8 +2222,8 @@ def keff_search( x_max : float, optional Maximum allowed value for the parameter ``x``. b0 : int, optional - Number of active batches to use in the first function evaluation. - If None, uses the model's current setting. + Number of active batches to use in the first function evaluation. If + None, uses the model's current setting. b1 : int, optional Number of active batches to use in the second function evaluation. If None, uses the model's current setting. From a9ea0aa00bad87f45391857abaec5929afe8bf00 Mon Sep 17 00:00:00 2001 From: Paul Romano Date: Thu, 11 Sep 2025 09:30:24 -0500 Subject: [PATCH 08/14] Rename print_iterations --> output --- openmc/model/model.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/openmc/model/model.py b/openmc/model/model.py index d46aaf0bc9b..e3c41b4ad7b 100644 --- a/openmc/model/model.py +++ b/openmc/model/model.py @@ -2179,7 +2179,7 @@ def keff_search( b_min: int = 20, b_max: int | None = None, maxiter: int = 50, - print_iterations: bool = False, + output: bool = False, func_kwargs: dict[str, Any] | None = None, run_kwargs: dict[str, Any] | None = None, ) -> SearchResult: @@ -2233,9 +2233,8 @@ def keff_search( Maximum number of active batches to use in a function evaluation. maxiter : int, optional Maximum number of iterations to perform. - print_iterations : bool, optional - Whether or not to print the guess and the resultant keff during the - iteration process. + output : bool, optional + Whether or not to display output showing iteration progress. func_kwargs : dict, optional Keyword-based arguments to pass to the `func` function. run_kwargs : dict, optional @@ -2279,7 +2278,7 @@ def eval_at(x: float, batches: int) -> tuple[float, float]: with openmc.StatePoint(sp_filepath) as sp: keff = sp.keff - if print_iterations: + if output: nonlocal count count += 1 print(f'Iteration {count}: {batches=}, {x=:.2e}, {keff=:.5f}') From 9dce3dfcdc550ff16685a9bfe97eb52433e2c1fe Mon Sep 17 00:00:00 2001 From: Paul Romano Date: Mon, 15 Sep 2025 12:04:34 -0500 Subject: [PATCH 09/14] Add support for keff search with openmc.lib --- openmc/model/model.py | 26 ++++++++++++++++++-------- src/settings.cpp | 5 ----- 2 files changed, 18 insertions(+), 13 deletions(-) diff --git a/openmc/model/model.py b/openmc/model/model.py index e3c41b4ad7b..30e04ec74cf 100644 --- a/openmc/model/model.py +++ b/openmc/model/model.py @@ -2238,7 +2238,8 @@ def keff_search( func_kwargs : dict, optional Keyword-based arguments to pass to the `func` function. run_kwargs : dict, optional - Keyword arguments to pass to :meth:`openmc.Model.run`. + Keyword arguments to pass to :meth:`openmc.Model.run` or + :meth:`openmc.lib.run`. Returns ------- @@ -2248,6 +2249,7 @@ def keff_search( batches), plus convergence status and termination reason. """ + import openmc.lib check_type('model modifier', func, Callable) check_type('target', target, Real) @@ -2270,18 +2272,25 @@ def eval_at(x: float, batches: int) -> tuple[float, float]: # Modify the model with the current guess func(x, **func_kwargs) - # Change the number of batches - self.settings.batches = self.settings.inactive + batches + # Change the number of batches and run the model + batches = self.settings.inactive + batches + if openmc.lib.is_initialized: + openmc.lib.settings.set_batches(batches) + openmc.lib.reset() + openmc.lib.run(**run_kwargs) + sp_filepath = f'statepoint.{batches}.h5' + else: + self.settings.batches = batches + sp_filepath = self.run(**run_kwargs) - # Run the model and obtain keff - sp_filepath = self.run(**run_kwargs) + # Extract keff and its uncertainty with openmc.StatePoint(sp_filepath) as sp: keff = sp.keff if output: nonlocal count count += 1 - print(f'Iteration {count}: {batches=}, {x=:.2e}, {keff=:.5f}') + print(f'Iteration {count}: {batches=}, {x=:.6g}, {keff=:.5f}') xs.append(float(x)) fs.append(float(keff.n - target)) @@ -2297,7 +2306,8 @@ def eval_at(x: float, batches: int) -> tuple[float, float]: # Perform the search (inlined GRsecant) in a temporary directory with TemporaryDirectory() as tmpdir: - run_kwargs.setdefault('cwd', tmpdir) + if not openmc.lib.is_initialized: + run_kwargs.setdefault('cwd', tmpdir) # ---- Seed with two evaluations f0, s0 = eval_at(x0, b0) @@ -2307,7 +2317,7 @@ def eval_at(x: float, batches: int) -> tuple[float, float]: if abs(f1) <= k_tol and s1 <= sigma_final: return SearchResult(x1, xs, fs, ss, gs, True, "converged") - for _ in range(maxiter): + for _ in range(maxiter - 2): # ------ Step 1: propose next x via GRsecant m = min(memory, len(xs)) diff --git a/src/settings.cpp b/src/settings.cpp index afa42a3c5e6..8f05f8c2b90 100644 --- a/src/settings.cpp +++ b/src/settings.cpp @@ -1210,11 +1210,6 @@ extern "C" int openmc_set_n_batches( return OPENMC_E_INVALID_ARGUMENT; } - if (simulation::current_batch >= n_batches) { - set_errmsg("Number of batches must be greater than current batch."); - return OPENMC_E_INVALID_ARGUMENT; - } - if (!settings::trigger_on) { // Set n_batches and n_max_batches to same value settings::n_batches = n_batches; From 36fccf702f7f483eb528eb45156990169553e272 Mon Sep 17 00:00:00 2001 From: Paul Romano Date: Mon, 15 Sep 2025 12:05:56 -0500 Subject: [PATCH 10/14] Get rid of b1 argument --- openmc/model/model.py | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/openmc/model/model.py b/openmc/model/model.py index 30e04ec74cf..65d605e1b21 100644 --- a/openmc/model/model.py +++ b/openmc/model/model.py @@ -2175,7 +2175,6 @@ def keff_search( x_min: float | None = None, x_max: float | None = None, b0: int | None = None, - b1: int | None = None, b_min: int = 20, b_max: int | None = None, maxiter: int = 50, @@ -2222,11 +2221,8 @@ def keff_search( x_max : float, optional Maximum allowed value for the parameter ``x``. b0 : int, optional - Number of active batches to use in the first function evaluation. If - None, uses the model's current setting. - b1 : int, optional - Number of active batches to use in the second function evaluation. - If None, uses the model's current setting. + Number of active batches to use for the initial function + evaluations. If None, uses the model's current setting. b_min : int, optional Minimum number of active batches to use in a function evaluation. b_max : int, optional @@ -2298,11 +2294,9 @@ def eval_at(x: float, batches: int) -> tuple[float, float]: gs.append(int(batches)) return fs[-1], ss[-1] - # Default b0/b1 to current model settings if not explicitly provided + # Default b0 to current model settings if not explicitly provided if b0 is None: b0 = self.settings.batches - self.settings.inactive - if b1 is None: - b1 = self.settings.batches - self.settings.inactive # Perform the search (inlined GRsecant) in a temporary directory with TemporaryDirectory() as tmpdir: @@ -2313,7 +2307,7 @@ def eval_at(x: float, batches: int) -> tuple[float, float]: f0, s0 = eval_at(x0, b0) if abs(f0) <= k_tol and s0 <= sigma_final: return SearchResult(x0, xs, fs, ss, gs, True, "converged") - f1, s1 = eval_at(x1, b1) + f1, s1 = eval_at(x1, b0) if abs(f1) <= k_tol and s1 <= sigma_final: return SearchResult(x1, xs, fs, ss, gs, True, "converged") From 2a15eb863177eaabf95789778020c3470934a525 Mon Sep 17 00:00:00 2001 From: Paul Romano Date: Mon, 15 Sep 2025 13:11:57 -0500 Subject: [PATCH 11/14] Add documentation on stopping criterion --- openmc/model/model.py | 27 ++++++++++++++++++--------- 1 file changed, 18 insertions(+), 9 deletions(-) diff --git a/openmc/model/model.py b/openmc/model/model.py index 65d605e1b21..4007e6455fd 100644 --- a/openmc/model/model.py +++ b/openmc/model/model.py @@ -2170,7 +2170,7 @@ def keff_search( k_tol: float = 1e-4, sigma_final: float = 3e-4, p: float = 0.5, - sigma_factor: float = 0.95, + q: float = 0.95, memory: int = 4, x_min: float | None = None, x_max: float | None = None, @@ -2182,7 +2182,7 @@ def keff_search( func_kwargs: dict[str, Any] | None = None, run_kwargs: dict[str, Any] | None = None, ) -> SearchResult: - """Perform a keff search on a model parametrized by a single variable. + r"""Perform a keff search on a model parametrized by a single variable. This method uses the GRsecant method described in a paper by `Price and Roskoff `_. The GRsecant @@ -2192,6 +2192,17 @@ def keff_search( point to evaluate. It also adaptively changes the number of batches to meet the target uncertainty value at each iteration. + The target uncertainty for iteration :math:`n+1` is determined by the + following equation (following Eq. (8) in the paper): + + .. math:: + \sigma_{i+1} = q \sigma_\text{final} \left ( \frac{ \min \left \{ + \left\lvert k_i - k_\text{target} \right\rvert : k=0,1,\dots,n + \right \} }{k_\text{tol}} \right )^p + + where :math:`q` is a multiplicative factor less than 1, given as the + ``sigma_factor`` parameter below. + Parameters ---------- func : ModelModifier @@ -2207,12 +2218,11 @@ def keff_search( Stopping criterion on the function value; the absolute value must be within ``k_tol`` of zero to be accepted. sigma_final : float, optional - Maximum accepted uncertainty in ``f(x)`` for the Eq. 8 stopping - criterion. + Maximum accepted k-effective uncertainty for the stopping criterion. p : float, optional - Exponent used in the Eq. 8 stopping criterion. - sigma_factor : float, optional - Multiplicative factor used in the Eq. 8 stopping criterion. + Exponent used in the stopping criterion. + q : float, optional + Multiplicative factor used in the stopping criterion. memory : int, optional Number of most-recent points used in the weighted linear fit of ``f(x) = a + b x`` to predict the next point. @@ -2263,7 +2273,6 @@ def keff_search( count = 0 # Helper function to evaluate f and store results - # TODO: Support use of openmc.lib def eval_at(x: float, batches: int) -> tuple[float, float]: # Modify the model with the current guess func(x, **func_kwargs) @@ -2333,7 +2342,7 @@ def eval_at(x: float, batches: int) -> tuple[float, float]: # ------ Step 2: choose target σ for next run (Eq. 8 + clamp) min_abs_f = float(np.min(np.abs(fs))) - base = sigma_factor * sigma_final + base = q * sigma_final ratio = min_abs_f / k_tol if k_tol > 0 else 1.0 sig = base * (ratio ** p) sig_target = max(sig, base) From 0d580ad10824c967c3067fec6c608d76983d1fc3 Mon Sep 17 00:00:00 2001 From: Paul Romano Date: Wed, 17 Sep 2025 16:16:00 -0500 Subject: [PATCH 12/14] Fix two failing tests --- tests/unit_tests/test_lib.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/tests/unit_tests/test_lib.py b/tests/unit_tests/test_lib.py index 8ab35335fc0..9afaaa92dd0 100644 --- a/tests/unit_tests/test_lib.py +++ b/tests/unit_tests/test_lib.py @@ -468,9 +468,6 @@ def test_set_n_batches(lib_run): for i in range(7): openmc.lib.next_batch() - # Setting n_batches less than current_batch should raise error - with pytest.raises(exc.InvalidArgumentError): - settings.set_batches(6) # n_batches should stay the same assert settings.get_batches() == 10 From 7948ba655d8ebb14688aadb4ef6337297aba7b70 Mon Sep 17 00:00:00 2001 From: Paul Romano Date: Mon, 13 Oct 2025 13:08:31 -0500 Subject: [PATCH 13/14] Incorporate suggestion from @church89 --- openmc/model/model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/openmc/model/model.py b/openmc/model/model.py index d4334f4ab01..7963751d567 100644 --- a/openmc/model/model.py +++ b/openmc/model/model.py @@ -2323,7 +2323,7 @@ def eval_at(x: float, batches: int) -> tuple[float, float]: func(x, **func_kwargs) # Change the number of batches and run the model - batches = self.settings.inactive + batches + batches += self.settings.inactive if openmc.lib.is_initialized: openmc.lib.settings.set_batches(batches) openmc.lib.reset() From efcb7734105259942dd140e4f2953553377d4cfd Mon Sep 17 00:00:00 2001 From: Paul Romano Date: Mon, 13 Oct 2025 14:38:15 -0500 Subject: [PATCH 14/14] Add test for keff_search method --- tests/unit_tests/test_model.py | 57 ++++++++++++++++++++++++++++++++++ 1 file changed, 57 insertions(+) diff --git a/tests/unit_tests/test_model.py b/tests/unit_tests/test_model.py index 60f8b1a25a3..f4f94a47cac 100644 --- a/tests/unit_tests/test_model.py +++ b/tests/unit_tests/test_model.py @@ -901,6 +901,7 @@ def test_id_map_aligned_model(): assert tr_instance == 3, f"Expected cell instance 3 at top-right corner, got {tr_instance}" assert tr_material == 5, f"Expected material ID 5 at top-right corner, got {tr_material}" + def test_setter_from_list(): mat = openmc.Material() model = openmc.Model(materials=[mat]) @@ -913,3 +914,59 @@ def test_setter_from_list(): plot = openmc.Plot() model = openmc.Model(plots=[plot]) assert isinstance(model.plots, openmc.Plots) + + +def test_keff_search(run_in_tmpdir): + """Test the Model.keff_search method""" + + # Create model of a sphere of U235 + mat = openmc.Material() + mat.set_density('g/cm3', 18.9) + mat.add_nuclide('U235', 1.0) + sphere = openmc.Sphere(r=10.0, boundary_type='vacuum') + cell = openmc.Cell(fill=mat, region=-sphere) + geometry = openmc.Geometry([cell]) + settings = openmc.Settings(particles=1000, inactive=10, batches=30) + model = openmc.Model(geometry=geometry, settings=settings) + + # Define function to modify sphere radius + def modify_radius(radius): + sphere.r = radius + + # Perform keff search + k_tol = 4e-3 + sigma_final = 2e-3 + result = model.keff_search( + func=modify_radius, + x0=6.0, + x1=9.0, + k_tol=k_tol, + sigma_final=sigma_final, + output=True, + ) + + final_keff = result.means[-1] + 1.0 # Add back target since means are (keff - target) + final_sigma = result.stdevs[-1] + + # Check for convergence and that tolerances are met + assert result.converged, "keff_search did not converge" + assert abs(final_keff - 1.0) <= k_tol, \ + f"Final keff {final_keff:.5f} not within k_tol {k_tol}" + assert final_sigma <= sigma_final, \ + f"Final uncertainty {final_sigma:.5f} exceeds sigma_final {sigma_final}" + + # Check type of result + assert isinstance(result, openmc.model.SearchResult) + + # Check that we have function evaluation history + assert len(result.parameters) >= 2 + assert len(result.means) == len(result.parameters) + assert len(result.stdevs) == len(result.parameters) + assert len(result.batches) == len(result.parameters) + + # Check that function_calls property works + assert result.function_calls == len(result.parameters) + + # Check that total_batches property works + assert result.total_batches == sum(result.batches) + assert result.total_batches > 0