diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index d2e24ec..a315f31 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -53,7 +53,7 @@ jobs: python-version: ${{ matrix.python-version }} - name: "Main Script" run: | - EXTRA_INSTALL="numpy" + EXTRA_INSTALL="numpy scipy" sudo apt update sudo apt install gfortran-7 liblapack-dev libblas-dev sudo ln -sf /usr/bin/gfortran-7 /usr/bin/gfortran diff --git a/leap/__init__.py b/leap/__init__.py index 8d4c75f..7cb2045 100644 --- a/leap/__init__.py +++ b/leap/__init__.py @@ -88,14 +88,15 @@ def implicit_expression(self, expression_tag=None): # }}} -# {{{ two-order adaptivity +# {{{ adaptivity -class TwoOrderAdaptiveMethodBuilderMixin(MethodBuilder): +class AdaptiveMethodBuilderMixin(MethodBuilder): """ This class expected the following members to be defined: state, t, dt. """ - def __init__(self, atol=0, rtol=0, max_dt_growth=None, min_dt_shrinkage=None): + def __init__(self, atol=0, rtol=0, max_dt_growth=None, min_dt_shrinkage=None, + single_order=False): self.adaptive = bool(atol or rtol) self.atol = atol self.rtol = rtol @@ -109,6 +110,12 @@ def __init__(self, atol=0, rtol=0, max_dt_growth=None, min_dt_shrinkage=None): self.max_dt_growth = max_dt_growth self.min_dt_shrinkage = min_dt_shrinkage + self.single_order = single_order + + # Error constants for Adams methods + self.c_exp = [1/2, 5/12, 3/8, 251/720] + self.c_imp = [-1/2, -1/12, -1/24, -19/720] + def finish_nonadaptive(self, cb, high_order_estimate, low_order_estimate): raise NotImplementedError() @@ -160,6 +167,70 @@ def norm(expr): Min((0.9 * self.dt * rel_error ** (-1 / self.high_order), self.max_dt_growth * self.dt))) + def finish_nonadaptive_hist(self, cb, high_order_estimate, low_order_estimate, + hist, time_hist): + raise NotImplementedError() + + def finish_adaptive_hist(self, cb, high_order_estimate, low_order_estimate, + hist, time_hist): + from pymbolic import var + from pymbolic.primitives import Comparison, LogicalOr, Max, Min + from dagrt.expression import IfThenElse + + norm_start_state = var("norm_start_state") + norm_end_state = var("norm_end_state") + rel_error_raw = var("rel_error_raw") + rel_error = var("rel_error") + + def norm(expr): + return var("norm_2")(expr) + + cb(norm_start_state, norm(self.state)) + cb(norm_end_state, norm(low_order_estimate)) + if self.single_order: + cb(rel_error_raw, abs(self.c_imp[self.low_order-1]) + * norm(high_order_estimate - low_order_estimate) + / (abs(self.c_exp[self.low_order-1] + - self.c_imp[self.low_order-1]) + * ((self.atol + self.rtol + * Max((norm_start_state, norm_end_state)))) + ) + ) + else: + cb(rel_error_raw, norm(high_order_estimate - low_order_estimate) + / (var("len")(self.state) ** 0.5 + * ( + self.atol + self.rtol + * Max((norm_start_state, norm_end_state)) + ))) + + cb(rel_error, IfThenElse(Comparison(rel_error_raw, "==", 0), + 1.0e-14, rel_error_raw)) + + with cb.if_(LogicalOr((Comparison(rel_error, ">", 1), + var("isnan")(rel_error)))): + + with cb.if_(var("isnan")(rel_error)): + cb(self.dt, self.min_dt_shrinkage * self.dt) + with cb.else_(): + cb(self.dt, Max((0.9 * self.dt + * rel_error ** (-1 / self.low_order), + self.min_dt_shrinkage * self.dt))) + + with cb.if_(self.t + self.dt, "==", self.t): + cb.raise_(TimeStepUnderflow) + with cb.else_(): + cb.fail_step() + + with cb.else_(): + # This updates :
should not be set before this is called. + self.finish_nonadaptive_hist(cb, high_order_estimate, low_order_estimate, + hist, time_hist) + + cb(self.dt, + Min((0.9 * self.dt * rel_error ** (-1 / self.high_order), + self.max_dt_growth * self.dt))) + # }}} diff --git a/leap/implicit.py b/leap/implicit.py index bc9d008..ccebeae 100644 --- a/leap/implicit.py +++ b/leap/implicit.py @@ -86,3 +86,28 @@ def solver_hook(expr, var, id, **kwargs): new_phases[phase_name] = phase.copy(statements=new_statements) return dag.copy(phases=new_phases) + + +def generate_solve(cb, unknowns, equations, var_to_unknown, guess): + + # got a square system, let's solve + assignees = [unk.name for unk in unknowns] + + from pymbolic import substitute + subst_dict = { + rhs_var.name: var_to_unknown[rhs_var] + for rhs_var in unknowns} + + cb.assign_implicit( + assignees=assignees, + solve_components=[ + var_to_unknown[unk].name + for unk in unknowns], + expressions=[ + substitute(eq, subst_dict) + for eq in equations], + + # TODO: Could supply a starting guess + other_params={ + "guess": guess}, + solver_id="solve") diff --git a/leap/multistep/__init__.py b/leap/multistep/__init__.py index 660dbbc..3f1cc73 100644 --- a/leap/multistep/__init__.py +++ b/leap/multistep/__init__.py @@ -4,7 +4,7 @@ __copyright__ = """ Copyright (C) 2007 Andreas Kloeckner Copyright (C) 2014, 2015 Matt Wala -Copyright (C) 2015 Cory Mikida +Copyright (C) 2015, 2020 Cory Mikida """ __license__ = """ @@ -29,14 +29,17 @@ import numpy as np import numpy.linalg as la -from leap import MethodBuilder +from leap import MethodBuilder, AdaptiveMethodBuilderMixin from pymbolic import var __doc__ = """ .. autoclass:: AdamsIntegrationFunctionFamily .. autoclass:: AdamsMonomialIntegrationFunctionFamily +.. autoclass:: AdamsMethodBuilder .. autoclass:: AdamsBashforthMethodBuilder +.. autoclass:: AdamsMoultonMethodBuilder +.. autoclass:: EmbeddedAdamsMethodBuilder """ @@ -206,9 +209,10 @@ def emit_adams_extrapolation(cb, name_gen, # }}} -# {{{ ab method +# {{{ adams method + -class AdamsBashforthMethodBuilder(MethodBuilder): +class AdamsMethodBuilder(MethodBuilder): """ User-supplied context: + component_id: The value that is integrated @@ -265,6 +269,7 @@ def __init__(self, component_id, function_family=None, state_filter_name=None, self.t = var("") self.dt = var("
") + self.state_filter_name = state_filter_name if state_filter_name is not None: self.state_filter = var("" + state_filter_name) else: @@ -274,62 +279,16 @@ def generate(self): """ :returns: :class:`dagrt.language.DAGCode` """ - from pytools import UniqueNameGenerator - name_gen = UniqueNameGenerator() from dagrt.language import DAGCode, CodeBuilder - array = var("array") - rhs_var = var("rhs_var") - # Initialization with CodeBuilder(name="initialization") as cb_init: cb_init(self.step, 1) # Primary with CodeBuilder(name="primary") as cb_primary: - - if not self.static_dt: - time_history_data = self.time_history + [self.t] - time_hist_var = var(name_gen("time_history")) - cb_primary(time_hist_var, array(self.hist_length)) - for i in range(self.hist_length): - cb_primary(time_hist_var[i], time_history_data[i] - self.t) - - time_hist = time_hist_var - t_end = self.dt - dt_factor = 1 - - else: - time_hist = list(range(-self.hist_length+1, 0+1)) # noqa pylint:disable=invalid-unary-operand-type - dt_factor = self.dt - t_end = 1 - - cb_primary(rhs_var, self.eval_rhs(self.t, self.state)) - history = self.history + [rhs_var] - - ab_sum = emit_adams_integration( - cb_primary, name_gen, - self.function_family, - time_hist, history, - 0, t_end) - - state_est = self.state + dt_factor * ab_sum - if self.state_filter is not None: - state_est = self.state_filter(state_est) - cb_primary(self.state, state_est) - - # Rotate history and time history. - for i in range(self.hist_length - 1): - cb_primary(self.history[i], history[i + 1]) - - if not self.static_dt: - cb_primary(self.time_history[i], time_history_data[i + 1]) - - cb_primary(self.t, self.t + self.dt) - cb_primary.yield_state(expression=self.state, - component_id=self.component_id, - time_id="", time=self.t) + self.generate_primary(cb_primary) if self.hist_length == 1: # The first order method requires no bootstrapping. @@ -348,7 +307,8 @@ def generate(self): component_id=self.component_id, time_id="", time=self.t) cb_bootstrap(self.step, self.step + 1) - with cb_bootstrap.if_(self.step, "==", self.hist_length): + bootstrap_length = self.hist_length + with cb_bootstrap.if_(self.step, "==", bootstrap_length): cb_bootstrap.switch_phase("primary") return DAGCode( @@ -367,6 +327,85 @@ def eval_rhs(self, t, y): parameters=(), kw_parameters={"t": t, self.component_id: y}) + def rotate_and_yield(self, cb, hist, time_hist): + for i in range(self.hist_length - 1): + cb(self.history[i], hist[i + 1]) + + if not self.static_dt: + cb(self.time_history[i], time_hist[i + 1]) + + cb(self.t, self.t + self.dt) + cb.yield_state(expression=self.state, + component_id=self.component_id, + time_id="", time=self.t) + + def set_up_time_data(self, cb, new_t): + from pytools import UniqueNameGenerator + name_gen = UniqueNameGenerator() + array = var("array") + if not self.static_dt: + time_data = self.time_history + [new_t] + time_data_var = var(name_gen("time_data")) + cb(time_data_var, array(self.hist_length)) + for i in range(self.hist_length): + cb(time_data_var[i], time_data[i] - self.t) + + relv_times = time_data_var + t_end = self.dt + dt_factor = 1 + + else: + if new_t == self.t: + relv_times = list(range(-self.hist_length+1, 0+1)) # noqa pylint:disable=invalid-unary-operand-type + time_data = list(range(-self.hist_length+1, 0+1)) # noqa pylint:disable=invalid-unary-operand-type + else: + # In implicit mode, the vector of times + # passed to adams_integration must + # include the *next* point in time. + relv_times = list(range(-self.hist_length+2, 0+2)) # noqa pylint:disable=invalid-unary-operand-type + time_data = list(range(-self.hist_length+2, 0+2)) # noqa pylint:disable=invalid-unary-operand-type + dt_factor = self.dt + t_end = 1 + + return time_data, relv_times, dt_factor, t_end + + def generate_primary(self, cb): + raise NotImplementedError() + + def rk_bootstrap(self, cb): + raise NotImplementedError() + +# }}} + + +# {{{ ab method + +class AdamsBashforthMethodBuilder(AdamsMethodBuilder): + def generate_primary(self, cb): + rhs_var = var("rhs_var") + from pytools import UniqueNameGenerator + name_gen = UniqueNameGenerator() + + time_history_data, time_hist, \ + dt_factor, t_end = self.set_up_time_data(cb, self.t) + + cb(rhs_var, self.eval_rhs(self.t, self.state)) + history = self.history + [rhs_var] + + ab_sum = emit_adams_integration( + cb, name_gen, + self.function_family, + time_hist, history, + 0, t_end) + + state_est = self.state + dt_factor * ab_sum + if self.state_filter is not None: + state_est = self.state_filter(state_est) + cb(self.state, state_est) + + # Rotate history and time history. + self.rotate_and_yield(cb, history, time_history_data) + def rk_bootstrap(self, cb): """Initialize the timestepper with an RK method.""" @@ -385,35 +424,432 @@ def rk_bootstrap(self, cb): from leap.rk import ORDER_TO_RK_METHOD_BUILDER rk_method = ORDER_TO_RK_METHOD_BUILDER[self.function_family.order] - rk_tableau = tuple(zip(rk_method.c, rk_method.a_explicit)) rk_coeffs = rk_method.output_coeffs + stage_coeff_set_names = ("explicit",) + stage_coeff_sets = {"explicit": rk_method.a_explicit} + estimate_coeff_set_names = ("main",) + estimate_coeff_sets = {"main": rk_coeffs} + rhs_funcs = {"explicit": var(""+self.component_id)} + + # Traverse RK stage loop of appropriate order and update state. + rk = rk_method(self.component_id, self.state_filter_name) + cb = rk.generate_butcher_init(cb, stage_coeff_set_names, + stage_coeff_sets, rhs_funcs, + estimate_coeff_set_names, + estimate_coeff_sets) + cb, rhss, est_vars = rk.generate_butcher_primary(cb, stage_coeff_set_names, + stage_coeff_sets, rhs_funcs, + estimate_coeff_set_names, + estimate_coeff_sets) - # Stage loop (taken from EmbeddedButcherTableauMethodBuilder) - rhss = [var("rk_rhs_" + str(i)) for i in range(len(rk_tableau))] - for stage_num, (c, coeffs) in enumerate(rk_tableau): - if len(coeffs) == 0: - assert c == 0 - cb(rhss[stage_num], rhs_var) - else: - stage = self.state + sum(self.dt * coeff * rhss[j] - for (j, coeff) - in enumerate(coeffs)) + # Assign the value of the new state. + cb(self.state, est_vars[0]) - if self.state_filter is not None: - stage = self.state_filter(stage) +# }}} + + +# {{{ am method - cb(rhss[stage_num], self.eval_rhs(self.t + c * self.dt, stage)) - # Merge the values of the RHSs. - rk_comb = sum(coeff * rhss[j] for j, coeff in enumerate(rk_coeffs)) +class AdamsMoultonMethodBuilder(AdamsMethodBuilder): + def generate_primary(self, cb): + + from pytools import UniqueNameGenerator + name_gen = UniqueNameGenerator() + rhs_next_var = var("rhs_next_var") + rhs_var_to_unknown = {} + unkvar = cb.fresh_var("unk") + rhs_var_to_unknown[rhs_next_var] = unkvar + + # In implicit mode, the vector of times + # passed to adams_integration must + # include the *next* point in time. + time_data, relv_times, \ + dt_factor, t_end = self.set_up_time_data(cb, self.t + self.dt) + + # Implicit setup - rhs_next_var is an unknown, needs implicit solve. + equations = [] + unknowns = set() + knowns = set() + + unknowns.add(rhs_next_var) + + # Create RHS vector for Adams setup, + # including RHS value to be implicitly + # solved for + rhss = self.history + [rhs_next_var] + + # Set up the actual Adams-Moulton step. + am_sum = emit_adams_integration( + cb, name_gen, + self.function_family, + relv_times, rhss, + 0, t_end) + + state_est = self.state + dt_factor * am_sum + + # Build the implicit solve expression. + from dagrt.expression import collapse_constants + from pymbolic.mapper.distributor import DistributeMapper as DistMap + solve_expression = collapse_constants( + rhs_next_var - self.eval_rhs(self.t + self.dt, + DistMap()(state_est)), + list(unknowns) + [self.state], + cb.assign, cb.fresh_var) + equations.append(solve_expression) + + # {{{ emit solve if possible + + if unknowns and len(unknowns) == len(equations): + from leap.implicit import generate_solve + generate_solve(cb, unknowns, equations, rhs_var_to_unknown, self.state) + elif not unknowns: + raise ValueError("Adams-Moulton implicit timestep has no unknowns") + elif len(unknowns) > len(equations): + raise ValueError("Adams-Moulton implicit timestep has more unknowns " + "than equations") + elif len(unknowns) < len(equations): + raise ValueError("Adams-Moulton implicit timestep has more equations " + "than unknowns") + + del equations[:] + knowns.update(unknowns) + unknowns.clear() + + # }}} - state_est = self.state + self.dt * rk_comb + # Update the state now that we've solved. if self.state_filter is not None: state_est = self.state_filter(state_est) + cb(self.state, state_est) + + # Add new RHS and time to history and rotate. + history = self.history + [rhs_next_var] + self.rotate_and_yield(cb, history, time_data) + + def rk_bootstrap(self, cb): + """Initialize the timestepper with an IMPLICIT RK method.""" + + from leap.rk import IMPLICIT_ORDER_TO_RK_METHOD_BUILDER + rk_method = IMPLICIT_ORDER_TO_RK_METHOD_BUILDER[self.function_family.order] + rk_coeffs = rk_method.output_coeffs + stage_coeff_set_names = ("implicit",) + stage_coeff_sets = {"implicit": rk_method.a_implicit} + estimate_coeff_set_names = ("main",) + estimate_coeff_sets = {"main": rk_coeffs} + rhs_funcs = {"implicit": var(""+self.component_id)} + + # Traverse RK stage loop of appropriate order and update state. + rk = rk_method(self.component_id, self.state_filter_name) + cb = rk.generate_butcher_init(cb, stage_coeff_set_names, + stage_coeff_sets, rhs_funcs, + estimate_coeff_set_names, + estimate_coeff_sets) + cb, rhss, est_vars = rk.generate_butcher_primary(cb, stage_coeff_set_names, + stage_coeff_sets, rhs_funcs, + estimate_coeff_set_names, + estimate_coeff_sets) # Assign the value of the new state. - cb(self.state, state_est) + cb(self.state, est_vars[0]) + + # Save the "next" RHS to the AM history + rhs_next_var = var("rhs_next_var") + + cb(rhs_next_var, self.eval_rhs(self.t + self.dt, self.state)) + + for i in range(len(self.history)): + + with cb.if_(self.step, "==", i + 1): + cb(self.history[i], rhs_next_var) + + if not self.static_dt: + cb(self.time_history[i], self.t + self.dt) + +# }}} + + +# {{{ embedded method w/adaptivity + + +class EmbeddedAdamsMethodBuilder( + AdamsMethodBuilder, AdaptiveMethodBuilderMixin): + """ + User-supplied context: + + component_id: The value that is integrated + + component_id: The right hand side function + """ + + def __init__(self, component_id, function_family=None, state_filter_name=None, + hist_length=None, static_dt=False, order=None, _extra_bootstrap=False, + use_high_order=False, atol=0, rtol=0, max_dt_growth=None, + min_dt_shrinkage=None, implicit=False): + """ + :arg function_family: Accepts an instance of + :class:`AdamsIntegrationFunctionFamily` + or an integer, in which case the classical monomial function family + with the order given by the integer is used. + :arg static_dt: If *True*, changing the timestep during time integration + is not allowed. + """ + + self.implicit = implicit + if function_family is not None and order is not None: + raise ValueError("may not specify both function_family and order") + + function_families = [] + if function_family is None: + function_families.append(order) + self.low_order = order + self.high_order = order + 1 + if not implicit: + function_families.append(order + 1) + del order + else: + function_families.append(order) + if not implicit: + function_families.append(order + 1) + + self.function_families = [] + self.function_families.append( + AdamsMonomialIntegrationFunctionFamily(function_families[0])) + if not implicit: + self.function_families.append( + AdamsMonomialIntegrationFunctionFamily(function_families[1])) + + if hist_length is None: + hist_length = function_families[0] + 1 + + # Check for reasonable history length. + if hist_length < function_families[0] + 1: + raise ValueError("Invalid history length specified for embedded Adams") + + self.hist_length = hist_length + + # If adaptivity is on, we can't have a static timestep. + if atol or rtol: + if static_dt is True: + raise ValueError("Can't have static timestepping with adaptivity") + + self.static_dt = static_dt + self.extra_bootstrap = _extra_bootstrap + + self.component_id = component_id + + # Declare variables + self.step = var("

step") + self.function = var("" + component_id) + self.history = \ + [var("

f_n_minus_" + str(i)) for i in range(hist_length - 1, 0, -1)] + + if not self.static_dt: + self.time_history = [ + var("

t_n_minus_" + str(i)) + for i in range(hist_length - 1, 0, -1)] + + self.state = var("" + component_id) + self.t = var("") + self.dt = var("

") + + self.state_filter_name = state_filter_name + if state_filter_name is not None: + self.state_filter = var("" + state_filter_name) + else: + self.state_filter = None + + if implicit: + single_order = True + else: + single_order = False + + AdaptiveMethodBuilderMixin.__init__( + self, + atol=atol, + rtol=rtol, + max_dt_growth=max_dt_growth, + min_dt_shrinkage=min_dt_shrinkage, + single_order=single_order) + + self.use_high_order = use_high_order + + def generate_primary(self, cb): + + from pytools import UniqueNameGenerator + name_gen = UniqueNameGenerator() + array = var("array") + rhs_var = var("rhs_var") + rhs_next_var = var("rhs_next_var") + rhs_var_to_unknown = {} + unkvar = cb.fresh_var("unk") + rhs_var_to_unknown[rhs_next_var] = unkvar + equations = [] + unknowns = set() + knowns = set() + + # Update history + if self.implicit: + unknowns.add(rhs_next_var) + # In implicit mode, the time history must + # include the *next* point in time. + time_data, relv_times, \ + dt_factor, t_end = self.set_up_time_data(cb, self.t + self.dt) + rhss = self.history + [rhs_next_var] + else: + time_data, relv_times, \ + dt_factor, t_end = self.set_up_time_data(cb, self.t) + cb(rhs_var, self.eval_rhs(self.t, self.state)) + rhss = self.history + [rhs_var] + + # Create history to feed to AB. + relv_times_ab_var = var(name_gen("time_data_ab")) + cb(relv_times_ab_var, array(self.hist_length-1)) + if self.implicit: + rhss_ab = rhss[:-1] + for i in range(self.hist_length-1): + cb(relv_times_ab_var[i], relv_times[i]) + else: + rhss_ab = rhss[1:] + for i in range(self.hist_length-1): + cb(relv_times_ab_var[i], relv_times[i+1]) + + relv_times_ab = relv_times_ab_var + + # Set up the actual Adams-Bashforth and (if implicit) Adams-Moulton steps. + ab_sum = emit_adams_integration( + cb, name_gen, + self.function_families[0], + relv_times_ab, rhss_ab, + 0, t_end) + + if self.implicit: + # Create history to feed to AM. + rhss_am = rhss[1:] + relv_times_am_var = var(name_gen("time_data_am")) + cb(relv_times_am_var, array(self.hist_length-1)) + for i in range(self.hist_length-1): + cb(relv_times_am_var[i], relv_times[i+1]) + + relv_times_am = relv_times_am_var + + # Create AM estimate. + am_sum = emit_adams_integration( + cb, name_gen, + self.function_families[0], + relv_times_am, rhss_am, + 0, t_end) + else: + # Create higher-order AB estimate. + ab_sum_high = emit_adams_integration( + cb, name_gen, + self.function_families[1], + relv_times, rhss, + 0, t_end) + + state_est_low = self.state + dt_factor * ab_sum + if self.implicit: + state_est_high = self.state + dt_factor * am_sum + else: + state_est_high = self.state + dt_factor * ab_sum_high + + if self.implicit: + # Build the implicit solve expression. + from dagrt.expression import collapse_constants + from pymbolic.mapper.distributor import DistributeMapper as DistMap + solve_expression = collapse_constants( + rhs_next_var - self.eval_rhs(self.t + self.dt, + DistMap()(state_est_high)), + list(unknowns) + [self.state], + cb.assign, cb.fresh_var) + equations.append(solve_expression) + + # {{{ emit solve if possible + + if unknowns and len(unknowns) == len(equations): + from leap.implicit import generate_solve + generate_solve(cb, unknowns, equations, + rhs_var_to_unknown, state_est_low) + elif not unknowns: + raise ValueError("Adaptive Adams implicit timestep has no unknowns") + elif len(unknowns) > len(equations): + raise ValueError("Adaptive Adams implicit timestep has more " + "unknowns than equations") + elif len(unknowns) < len(equations): + raise ValueError("Adaptive Adams implicit timestep has more " + "equations than unknowns") + + del equations[:] + knowns.update(unknowns) + unknowns.clear() + + # }}} + + # Update the state now that we've solved. + if self.state_filter is not None: + state_est_low = self.state_filter(state_est_low) + state_est_high = self.state_filter(state_est_high) + + # Finish needs to intervene here. + self.finish(cb, state_est_high, state_est_low, rhss, time_data) + + def finish(self, cb, high_est, low_est, rhss, time_data): + if not self.adaptive: + cb(self.state, low_est) + # Rotate history and time history. + self.rotate_and_yield(cb, rhss, time_data) + else: + self.finish_adaptive_hist(cb, high_est, low_est, rhss, time_data) + + def finish_nonadaptive_hist(self, cb, high_order_estimate, + low_order_estimate, rhss, time_data): + if self.use_high_order: + est = high_order_estimate + else: + est = low_order_estimate + + cb(self.state, est) + # Rotate history and time history. + self.rotate_and_yield(cb, rhss, time_data) + + def rk_bootstrap(self, cb): + """Initialize the timestepper with an RK method.""" + + rhs_var = var("rhs_var") + + cb(rhs_var, self.eval_rhs(self.t, self.state)) + + # Save the current RHS to the AB history + + for i in range(len(self.history)): + with cb.if_(self.step, "==", i + 1): + cb(self.history[i], rhs_var) + + if not self.static_dt: + cb(self.time_history[i], self.t) + + from leap.rk import ORDER_TO_RK_METHOD_BUILDER + rk_method = ORDER_TO_RK_METHOD_BUILDER[self.function_families[0].order] + rk_coeffs = rk_method.output_coeffs + stage_coeff_set_names = ("explicit",) + stage_coeff_sets = {"explicit": rk_method.a_explicit} + estimate_coeff_set_names = ("main",) + estimate_coeff_sets = {"main": rk_coeffs} + rhs_funcs = {"explicit": var(""+self.component_id)} + + # Traverse RK stage loop of appropriate order and update state. + rk = rk_method(self.component_id, self.state_filter_name) + cb = rk.generate_butcher_init(cb, stage_coeff_set_names, + stage_coeff_sets, rhs_funcs, + estimate_coeff_set_names, + estimate_coeff_sets) + cb, rhss, est_vars = rk.generate_butcher_primary(cb, stage_coeff_set_names, + stage_coeff_sets, rhs_funcs, + estimate_coeff_set_names, + estimate_coeff_sets) + + # Assign the value of the new state. + cb(self.state, est_vars[0]) # }}} + # vim: fdm=marker diff --git a/leap/rk/__init__.py b/leap/rk/__init__.py index 9b5d254..3f70788 100644 --- a/leap/rk/__init__.py +++ b/leap/rk/__init__.py @@ -4,6 +4,7 @@ __copyright__ = """ Copyright (C) 2007-2013 Andreas Kloeckner Copyright (C) 2014, 2015 Matt Wala +Copyright (C) 2020 Cory Mikida """ __license__ = """ @@ -27,7 +28,7 @@ """ import numpy as np -from leap import MethodBuilder, TwoOrderAdaptiveMethodBuilderMixin +from leap import MethodBuilder, AdaptiveMethodBuilderMixin from dagrt.language import CodeBuilder, DAGCode from pymbolic import var @@ -39,8 +40,12 @@ A dictionary mapping desired order of accuracy to a corresponding RK method builder. +.. data:: IMPLICIT_ORDER_TO_RK_METHOD_BUILDER + + A dictionary mapping desired order of accuracy to a corresponding implicit + RK method builder. + .. autoclass:: ForwardEulerMethodBuilder -.. autoclass:: BackwardEulerMethodBuilder .. autoclass:: MidpointMethodBuilder .. autoclass:: HeunsMethodBuilder .. autoclass:: RK3MethodBuilder @@ -64,6 +69,15 @@ .. autoclass:: SSPRKMethodBuilder .. autoclass:: SSPRK22MethodBuilder .. autoclass:: SSPRK33MethodBuilder + +Implicit Methods +----------------------------------------- + +.. autoclass:: BackwardEulerMethodBuilder +.. autoclass:: DIRK2MethodBuilder +.. autoclass:: DIRK3MethodBuilder +.. autoclass:: DIRK4MethodBuilder +.. autoclass:: DIRK5MethodBuilder """ @@ -119,6 +133,10 @@ def c(self): def a_explicit(self): raise NotImplementedError + @property + def a_implicit(self): + raise NotImplementedError + @property def output_coeffs(self): raise NotImplementedError @@ -153,48 +171,85 @@ def generate_butcher(self, stage_coeff_set_names, stage_coeff_sets, rhs_funcs, :arg estimate_coeffs_sets: a mapping from estimate coefficient set names to cofficients. """ - - from pymbolic import var - comp = self.component_id - - dt = self.dt - t = self.t - state = self.state - - nstages = len(self.c) - # {{{ check coefficients for plausibility for name in stage_coeff_set_names: - for istage in range(nstages): + for istage in range(len(self.c)): coeff_sum = sum(stage_coeff_sets[name][istage]) assert abs(coeff_sum - self.c[istage]) < 1e-12, ( name, istage, coeff_sum, self.c[istage]) # }}} - # {{{ initialization + cb_init = CodeBuilder(name="initialization") + cb_init = self.generate_butcher_init(cb_init, stage_coeff_set_names, + stage_coeff_sets, rhs_funcs, + estimate_coeff_set_names, + estimate_coeff_sets) + cb_primary = CodeBuilder(name="primary") + cb_primary, stage_rhs_vars, estimate_vars, = self.generate_butcher_primary( + cb_primary, stage_coeff_set_names, + stage_coeff_sets, rhs_funcs, + estimate_coeff_set_names, + estimate_coeff_sets) + cb_primary = self.generate_butcher_finish(cb_primary, stage_coeff_set_names, + stage_coeff_sets, rhs_funcs, + estimate_coeff_set_names, + estimate_coeff_sets, + stage_rhs_vars, + estimate_vars) + return DAGCode( + phases={ + "initial": cb_init.as_execution_phase(next_phase="primary"), + "primary": cb_primary.as_execution_phase(next_phase="primary") + }, + initial_phase="initial") - last_rhss = {} + # {{{ initialization - with CodeBuilder(name="initialization") as cb: - for name in stage_coeff_set_names: - if ( - name in self.recycle_last_stage_coeff_set_names - and _is_first_stage_same_as_last_stage( - self.c, stage_coeff_sets[name])): - last_rhss[name] = var("

last_rhs_" + name) - cb(last_rhss[name], rhs_funcs[name](t=t, **{comp: state})) + def generate_butcher_init(self, cb, stage_coeff_set_names, + stage_coeff_sets, rhs_funcs, estimate_coeff_set_names, + estimate_coeff_sets): + self.last_rhss = {} + from pymbolic import var + for name in stage_coeff_set_names: + if ( + name in self.recycle_last_stage_coeff_set_names + and _is_first_stage_same_as_last_stage( + self.c, stage_coeff_sets[name])): + self.last_rhss[name] = var("

last_rhs_" + name) + cb(self.last_rhss[name], rhs_funcs[name]( + t=self.t, **{self.component_id: self.state})) cb_init = cb - # }}} + return cb_init + + # }}} + + # {{{ stage loop + + def generate_butcher_primary(self, cb, stage_coeff_set_names, + stage_coeff_sets, rhs_funcs, estimate_coeff_set_names, + estimate_coeff_sets): + + last_state_est_var = cb.fresh_var("last_state_est") + last_state_est_var_valid = False + + equations = [] + unknowns = set() + + comp = self.component_id + nstages = len(self.c) + dt = self.dt + t = self.t + last_rhss = self.last_rhss stage_rhs_vars = {} rhs_var_to_unknown = {} for name in stage_coeff_set_names: stage_rhs_vars[name] = [ - cb.fresh_var(f"rhs_{name}_s{i}") for i in range(nstages)] + cb.fresh_var(f"rhs_{name}_s{i}") for i in range(len(self.c))] # These are rhss if they are not yet known and pending an implicit solve. for i, rhsvar in enumerate(stage_rhs_vars[name]): @@ -203,166 +258,157 @@ def generate_butcher(self, stage_coeff_set_names, stage_coeff_sets, rhs_funcs, knowns = set() - # {{{ stage loop - - last_state_est_var = cb.fresh_var("last_state_est") - last_state_est_var_valid = False - - with CodeBuilder(name="primary") as cb: - equations = [] - unknowns = set() - - def make_known(v): - unknowns.discard(v) - knowns.add(v) - - for istage in range(nstages): - for name in stage_coeff_set_names: - c = self.c[istage] - my_rhs = stage_rhs_vars[name][istage] - - if ( - name in self.recycle_last_stage_coeff_set_names - and istage == 0 - and _is_first_stage_same_as_last_stage( - self.c, stage_coeff_sets[name])): - cb(my_rhs, last_rhss[name]) - make_known(my_rhs) + def make_known(v): + unknowns.discard(v) + knowns.add(v) - else: - is_implicit = False - - state_increment = 0 - for src_name in stage_coeff_set_names: - coeffs = stage_coeff_sets[src_name][istage] - for src_istage, coeff in enumerate(coeffs): - rhsval = stage_rhs_vars[src_name][src_istage] - if rhsval not in knowns: - unknowns.add(rhsval) - is_implicit = True - - state_increment += dt * coeff * rhsval - - state_est = state + state_increment - if (self.state_filter is not None - and not ( - # reusing last output state - c == 0 - and all( - len(stage_coeff_sets[src_name][istage]) == 0 - for src_name in stage_coeff_set_names))): - state_est = self.state_filter(state_est) - - if is_implicit: - rhs_expr = rhs_funcs[name]( - t=t + c*dt, **{comp: state_est}) - - from dagrt.expression import collapse_constants - solve_expression = collapse_constants( - my_rhs - rhs_expr, - list(unknowns) + [self.state], - cb.assign, cb.fresh_var) - equations.append(solve_expression) - - if istage + 1 == nstages: - last_state_est_var_valid = False - - else: - if istage + 1 == nstages: - cb(last_state_est_var, state_est) - state_est = last_state_est_var - last_state_est_var_valid = True - - rhs_expr = rhs_funcs[name]( - t=t + c*dt, **{comp: state_est}) - - cb(my_rhs, rhs_expr) - make_known(my_rhs) - - # {{{ emit solve if possible - - if unknowns and len(unknowns) == len(equations): - # got a square system, let's solve - assignees = [unk.name for unk in unknowns] - - from pymbolic import substitute - subst_dict = { - rhs_var.name: rhs_var_to_unknown[rhs_var] - for rhs_var in unknowns} - - cb.assign_implicit( - assignees=assignees, - solve_components=[ - rhs_var_to_unknown[unk].name - for unk in unknowns], - expressions=[ - substitute(eq, subst_dict) - for eq in equations], - - # TODO: Could supply a starting guess - other_params={ - "guess": state}, - solver_id="solve") - - del equations[:] - knowns.update(unknowns) - unknowns.clear() - - # }}} - - # Compute solution estimates. - estimate_vars = [ - cb.fresh_var("est_"+name) - for name in estimate_coeff_set_names] - - for iest, name in enumerate(estimate_coeff_set_names): - out_coeffs = estimate_coeff_sets[name] + for istage in range(len(self.c)): + for name in stage_coeff_set_names: + c = self.c[istage] + my_rhs = stage_rhs_vars[name][istage] if ( - last_state_est_var_valid - and # noqa: W504 - _is_last_stage_same_as_output(self.c, - stage_coeff_sets, out_coeffs)): - state_est = last_state_est_var + name in self.recycle_last_stage_coeff_set_names + and istage == 0 + and _is_first_stage_same_as_last_stage( + self.c, stage_coeff_sets[name])): + cb(my_rhs, last_rhss[name]) + make_known(my_rhs) else: + is_implicit = False + state_increment = 0 for src_name in stage_coeff_set_names: - state_increment += sum( - coeff * stage_rhs_vars[src_name][src_istage] - for src_istage, coeff in enumerate(out_coeffs)) + coeffs = stage_coeff_sets[src_name][istage] + for src_istage, coeff in enumerate(coeffs): + rhsval = stage_rhs_vars[src_name][src_istage] + if rhsval not in knowns: + unknowns.add(rhsval) + is_implicit = True + + state_increment += dt * coeff * rhsval + + state_est = self.state + state_increment + if (self.state_filter is not None + and not ( + # reusing last output state + c == 0 + and all( + len(stage_coeff_sets[src_name][istage]) == 0 + for src_name in stage_coeff_set_names))): + state_est = self.state_filter(state_est) - state_est = state + dt*state_increment + if is_implicit: + rhs_expr = rhs_funcs[name]( + t=t + c*dt, **{comp: state_est}) - if self.state_filter is not None: - state_est = self.state_filter(state_est) + from dagrt.expression import collapse_constants + solve_expression = collapse_constants( + my_rhs - rhs_expr, + list(unknowns) + [self.state], + cb.assign, cb.fresh_var) + equations.append(solve_expression) - cb( - estimate_vars[iest], - state_est) + if istage + 1 == nstages: + last_state_est_var_valid = False - # This updates . - self.finish(cb, estimate_coeff_set_names, estimate_vars) + else: + if istage + 1 == nstages: + cb(last_state_est_var, state_est) + state_est = last_state_est_var + last_state_est_var_valid = True - # These updates have to happen *after* finish because before we - # don't yet know whether finish will accept the new state. - for name in stage_coeff_set_names: - if ( - name in self.recycle_last_stage_coeff_set_names - and _is_first_stage_same_as_last_stage( - self.c, stage_coeff_sets[name])): - cb(last_rhss[name], stage_rhs_vars[name][-1]) + rhs_expr = rhs_funcs[name]( + t=t + c*dt, **{comp: state_est}) + + cb(my_rhs, rhs_expr) + make_known(my_rhs) + + # {{{ emit solve if we have any unknowns/equations + + if unknowns and len(unknowns) == len(equations): + from leap.implicit import generate_solve + generate_solve(cb, unknowns, equations, rhs_var_to_unknown, + self.state) + elif not unknowns: + # we have an explicit Runge-Kutta method + pass + elif len(unknowns) > len(equations): + raise ValueError("Runge-Kutta implicit timestep has more " + "unknowns than equations") + elif len(unknowns) < len(equations): + raise ValueError("Runge-Kutta implicit timestep has more " + "equations than unknowns") + + del equations[:] + knowns.update(unknowns) + unknowns.clear() + + # }}} + + # Compute solution estimates. + estimate_vars = [ + cb.fresh_var("est_"+name) + for name in estimate_coeff_set_names] + + for iest, name in enumerate(estimate_coeff_set_names): + out_coeffs = estimate_coeff_sets[name] + + if ( + last_state_est_var_valid + and # noqa: W504 + _is_last_stage_same_as_output(self.c, + stage_coeff_sets, out_coeffs)): + state_est = last_state_est_var + + else: + state_increment = 0 + for src_name in stage_coeff_set_names: + state_increment += sum( + coeff * stage_rhs_vars[src_name][src_istage] + for src_istage, coeff in enumerate(out_coeffs)) + + state_est = self.state + dt*state_increment + + if self.state_filter is not None: + state_est = self.state_filter(state_est) + + cb( + estimate_vars[iest], + state_est) cb_primary = cb - # }}} + return cb_primary, stage_rhs_vars, estimate_vars - return DAGCode( - phases={ - "initial": cb_init.as_execution_phase(next_phase="primary"), - "primary": cb_primary.as_execution_phase(next_phase="primary") - }, - initial_phase="initial") + # }}} + + # {{{ update state and time + + def generate_butcher_finish(self, cb, stage_coeff_set_names, + stage_coeff_sets, rhs_funcs, estimate_coeff_set_names, + estimate_coeff_sets, stage_rhs_vars, estimate_vars): + + last_rhss = self.last_rhss + + # This updates . + self.finish(cb, estimate_coeff_set_names, estimate_vars) + + # These updates have to happen *after* finish because before we + # don't yet know whether finish will accept the new state. + for name in stage_coeff_set_names: + if ( + name in self.recycle_last_stage_coeff_set_names + and _is_first_stage_same_as_last_stage( + self.c, stage_coeff_sets[name])): + cb(last_rhss[name], stage_rhs_vars[name][-1]) + + cb_primary = cb + + return cb_primary + + # }}} def finish(self, cb, estimate_names, estimate_vars): cb(self.state, estimate_vars[0]) @@ -416,22 +462,6 @@ class ForwardEulerMethodBuilder(SimpleButcherTableauMethodBuilder): recycle_last_stage_coeff_set_names = () -class BackwardEulerMethodBuilder(SimpleButcherTableauMethodBuilder): - """ - .. automethod:: __init__ - .. automethod:: generate - """ - c = (0,) - - a_explicit = ( - (1,), - ) - - output_coeffs = (1,) - - recycle_last_stage_coeff_set_names = () - - class MidpointMethodBuilder(SimpleButcherTableauMethodBuilder): """ .. automethod:: __init__ @@ -539,14 +569,178 @@ class RK5MethodBuilder(SimpleButcherTableauMethodBuilder): # }}} +# {{{ implicit butcher tableau methods + + +class ImplicitButcherTableauMethodBuilder(ButcherTableauMethodBuilder): + def __init__(self, component_id, state_filter_name=None, + rhs_func_name=None): + super().__init__( + component_id=component_id, + state_filter_name=state_filter_name) + + if rhs_func_name is None: + rhs_func_name = ""+self.component_id + self.rhs_func_name = rhs_func_name + + def generate(self): + """ + :returns: :class:`dagrt.language.DAGCode` + """ + return self.generate_butcher( + stage_coeff_set_names=("implicit",), + stage_coeff_sets={ + "implicit": self.a_implicit}, + rhs_funcs={"implicit": var(self.rhs_func_name)}, + estimate_coeff_set_names=("main",), + estimate_coeff_sets={ + "main": self.output_coeffs, + }) + + +class BackwardEulerMethodBuilder(ImplicitButcherTableauMethodBuilder): + """ + .. automethod:: __init__ + .. automethod:: generate + """ + c = (1,) + + a_implicit = ( + (1,), + ) + + output_coeffs = (1,) + + recycle_last_stage_coeff_set_names = () + + +class DIRK2MethodBuilder(ImplicitButcherTableauMethodBuilder): + """ + Source: Kennedy & Carpenter: Diagonally Implicit Runge-Kutta + Methods for Ordinary Differential Equations. A Review + pp. 72, eqn 221 + + .. automethod:: __init__ + .. automethod:: generate + """ + + _x = (2 - np.sqrt(2))/2 + + c = (_x, 1) + + a_implicit = ( + (_x,), + (1 - _x, _x,), + ) + + output_coeffs = (1 - _x, _x) + + recycle_last_stage_coeff_set_names = () + + +class DIRK3MethodBuilder(ImplicitButcherTableauMethodBuilder): + """ + Source: Kennedy & Carpenter: Diagonally Implicit Runge-Kutta + Methods for Ordinary Differential Equations. A Review + pp. 77, eqn 229 & eqn 230 + + .. automethod:: __init__ + .. automethod:: generate + """ + + _x = 0.4358665215 + + c = (_x, (1 + _x)/2, 1) + + a_implicit = ( + (_x,), + ((1 - _x)/2, _x,), + (-3*_x**2/2 + 4*_x - 0.25, 3*_x**2/2 - 5*_x + 5/4, _x,), + ) + + output_coeffs = (-3*_x**2/2 + 4*_x - 0.25, 3*_x**2/2 - 5*_x + 5/4, _x) + + recycle_last_stage_coeff_set_names = () + + +class DIRK4MethodBuilder(ImplicitButcherTableauMethodBuilder): + """ + Source: Kennedy & Carpenter: Diagonally Implicit Runge-Kutta + Methods for Ordinary Differential Equations. A Review + pp. 78, eqn 232 + + .. automethod:: __init__ + .. automethod:: generate + """ + + _x1 = 1.06858 + + c = (_x1, 1/2, 1 - _x1) + + a_implicit = ( + (_x1,), + (1/2 - _x1, _x1,), + (2*_x1, 1 - 4*_x1, _x1), + ) + + output_coeffs = (1/(6*(1 - 2*_x1)**2), (3*(1 - 2*_x1)**2 - 1)/(3*(1 - 2*_x1)**2), + 1/(6*(1 - 2*_x1)**2)) + + recycle_last_stage_coeff_set_names = () + + +class DIRK5MethodBuilder(ImplicitButcherTableauMethodBuilder): + """ + Source: Kennedy & Carpenter: Diagonally Implicit Runge-Kutta + Methods for Ordinary Differential Equations. A Review + pp. 98, Table 24 + + .. automethod:: __init__ + .. automethod:: generate + """ + + c = (4024571134387/14474071345096, 5555633399575/5431021154178, + 5255299487392/12852514622453, 3/20, 10449500210709/14474071345096) + + a_implicit = ( + (4024571134387/14474071345096,), + (9365021263232/12572342979331, 4024571134387/14474071345096,), + (2144716224527/9320917548702, -397905335951/4008788611757, + 4024571134387/14474071345096,), + (-291541413000/6267936762551, 226761949132/4473940808273, + -1282248297070/9697416712681, 4024571134387/14474071345096,), + (-2481679516057/4626464057815, -197112422687/6604378783090, + 3952887910906/9713059315593, 4906835613583/8134926921134, + 4024571134387/14474071345096,), + ) + + output_coeffs = (-2522702558582/12162329469185, 1018267903655/12907234417901, + 4542392826351/13702606430957, 5001116467727/12224457745473, + 1509636094297/3891594770934) + + recycle_last_stage_coeff_set_names = () + + +IMPLICIT_ORDER_TO_RK_METHOD_BUILDER = { + 1: BackwardEulerMethodBuilder, + 2: DIRK2MethodBuilder, + 3: DIRK3MethodBuilder, + 4: DIRK4MethodBuilder, + 5: DIRK5MethodBuilder, + } + +# }}} + + # {{{ Embedded Runge-Kutta schemes base class + class EmbeddedButcherTableauMethodBuilder( - ButcherTableauMethodBuilder, TwoOrderAdaptiveMethodBuilderMixin): + ButcherTableauMethodBuilder, AdaptiveMethodBuilderMixin): """ User-supplied context: - + component_id: The value that is integrated - + component_id: The right hand side function + + component_id: The value that is integrated + + component_id: The right hand side function """ @property @@ -564,7 +758,7 @@ def __init__(self, component_id, use_high_order=True, state_filter_name=None, component_id=component_id, state_filter_name=state_filter_name) - TwoOrderAdaptiveMethodBuilderMixin.__init__( + AdaptiveMethodBuilderMixin.__init__( self, atol=atol, rtol=rtol, diff --git a/leap/rk/imex.py b/leap/rk/imex.py index 2eacb64..758bebd 100644 --- a/leap/rk/imex.py +++ b/leap/rk/imex.py @@ -2,7 +2,7 @@ from pymbolic import var -from leap import TwoOrderAdaptiveMethodBuilderMixin +from leap import AdaptiveMethodBuilderMixin from leap.rk import ButcherTableauMethodBuilder @@ -42,7 +42,7 @@ class KennedyCarpenterIMEXRungeKuttaMethodBuilderBase( - TwoOrderAdaptiveMethodBuilderMixin, ButcherTableauMethodBuilder): + AdaptiveMethodBuilderMixin, ButcherTableauMethodBuilder): """ Christopher A. Kennedy, Mark H. Carpenter. Additive Runge-Kutta schemes for convection-diffusion-reaction equations. @@ -81,7 +81,7 @@ def __init__(self, component_id, use_high_order=True, state_filter_name=None, component_id=component_id, state_filter_name=state_filter_name) - TwoOrderAdaptiveMethodBuilderMixin.__init__( + AdaptiveMethodBuilderMixin.__init__( self, atol=atol, rtol=rtol, diff --git a/test/test_ab.py b/test/test_adams.py similarity index 73% rename from test/test_ab.py rename to test/test_adams.py index afea0f2..5e3fe73 100755 --- a/test/test_ab.py +++ b/test/test_adams.py @@ -28,7 +28,7 @@ import sys import pytest -from leap.multistep import AdamsBashforthMethodBuilder +from leap.multistep import AdamsBashforthMethodBuilder, AdamsMoultonMethodBuilder from utils import ( # noqa python_method_impl_interpreter as pmi_int, @@ -53,6 +53,24 @@ def test_ab_accuracy(python_method_impl, method, expected_order, plot_solution=plot_solution) +@pytest.mark.parametrize(("method", "expected_order"), [ + (AdamsMoultonMethodBuilder("y", order, static_dt=static_dt), order) + for order in [1, 3, 5] + for static_dt in [True, False] + ] + [ + (AdamsMoultonMethodBuilder("y", order, hist_length=order+1, + static_dt=static_dt), order) + for order in [1, 3, 5] + for static_dt in [True, False] + ]) +def test_am_accuracy(python_method_impl, method, expected_order, + show_dag=False, plot_solution=False): + from utils import check_simple_convergence + check_simple_convergence(method=method, method_impl=python_method_impl, + expected_order=expected_order, show_dag=show_dag, + plot_solution=plot_solution, implicit=True) + + if __name__ == "__main__": if len(sys.argv) > 1: exec(sys.argv[1]) diff --git a/test/test_embedded_adams.py b/test/test_embedded_adams.py new file mode 100755 index 0000000..15d6c23 --- /dev/null +++ b/test/test_embedded_adams.py @@ -0,0 +1,235 @@ +#! /usr/bin/env python + +__copyright__ = "Copyright (C) 2014 Andreas Kloeckner" + +__license__ = """ +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +""" + +# avoid spurious: pytest.mark.parametrize is not callable +# pylint: disable=not-callable + +import sys +import pytest + +from leap.multistep import EmbeddedAdamsMethodBuilder +import numpy as np + +import logging + +from utils import ( # noqa + python_method_impl_interpreter as pmi_int, + python_method_impl_codegen as pmi_cg) + +logger = logging.getLogger(__name__) + + +# {{{ non-adaptive test + +@pytest.mark.parametrize(("method", "expected_order", "implicit"), [ + (EmbeddedAdamsMethodBuilder("y", order=2, use_high_order=False, + implicit=True), 2, True), + (EmbeddedAdamsMethodBuilder("y", order=3, use_high_order=False, + implicit=True), 3, True), + (EmbeddedAdamsMethodBuilder("y", order=4, use_high_order=False, + implicit=True), 4, True), + ]) +def test_embedded_accuracy(python_method_impl, method, expected_order, implicit, + show_dag=False, plot_solution=False): + from utils import check_simple_convergence + check_simple_convergence(method=method, method_impl=python_method_impl, + expected_order=expected_order, show_dag=show_dag, + plot_solution=plot_solution, implicit=implicit) + +# }}} + +# {{{ adaptive test + + +def solver(f, t, sub_y, coeff, guess): + from scipy.optimize import root + return root(lambda unk: unk - f(t=t, y=sub_y + coeff*unk), guess).x + + +def solver_hook(solve_expr, solve_var, solver_id, guess): + from dagrt.expression import match, substitute + + pieces = match("unk - rhs(t=t, y=sub_y + coeff*unk)", solve_expr, + pre_match={"unk": solve_var}) + pieces["guess"] = guess + return substitute("solver(t, sub_y, coeff, guess)", pieces) + + +@pytest.mark.parametrize(("method", "ss_frac", "bs_frac"), [ + (EmbeddedAdamsMethodBuilder("y", order=2, rtol=1e-6, implicit=True), 0.5, 0.05), + (EmbeddedAdamsMethodBuilder("y", order=3, rtol=1e-6, implicit=True), 0.5, 0.01), + (EmbeddedAdamsMethodBuilder("y", order=4, rtol=1e-6, implicit=True), + 0.8, 0.0005), + ]) +def test_adaptive_timestep(python_method_impl, method, ss_frac, bs_frac, + show_dag=False, plot=False): + from utils import check_adaptive_timestep + check_adaptive_timestep(python_method_impl=python_method_impl, method=method, + ss_frac=ss_frac, bs_frac=bs_frac, ss_targ=0.01, + bs_targ=0.05, show_dag=show_dag, plot=plot, + implicit=True) + + +@pytest.mark.parametrize(("method", "expected_order"), [ + (EmbeddedAdamsMethodBuilder("y", order=2, rtol=1e-6, implicit=True), 2), + (EmbeddedAdamsMethodBuilder("y", order=3, rtol=1e-6, implicit=True), 3), + (EmbeddedAdamsMethodBuilder("y", order=4, rtol=1e-6, implicit=True), 4), + ]) +def test_adaptive_accuracy(method, expected_order, show_dag=False, + plot=False, python_method_impl=pmi_cg): + # Use "DEBUG" to trace execution + logging.basicConfig(level=logging.INFO) + + component_id = method.component_id + code = method.generate() + + from leap.implicit import replace_AssignImplicit + code = replace_AssignImplicit(code, {"solve": solver_hook}) + + code_nonadapt = EmbeddedAdamsMethodBuilder("y", order=expected_order, + implicit=True).generate() + code_nonadapt = replace_AssignImplicit(code_nonadapt, {"solve": solver_hook}) + + if show_dag: + from dagrt.language import show_dependency_graph + show_dependency_graph(code) + + from stiff_test_systems import VanDerPolProblem + example = VanDerPolProblem() + y = example.initial() + + from functools import partial + interp = python_method_impl(code, + function_map={"" + component_id: example, + "solver": partial(solver, example)}) + + interp_nonadapt = python_method_impl(code_nonadapt, + function_map={"" + component_id: example, + "solver": partial(solver, example)}) + + interp.set_up(t_start=example.t_start, dt_start=1e-5, context={component_id: y}) + + times = [] + values = [] + + new_times = [] + new_values = [] + + last_t = 0 + step_sizes = [] + nsteps = [] + istep = 0 + + # Initial run to establish step sizes. + for event in interp.run(t_end=example.t_end/10.0): + if isinstance(event, interp.StateComputed): + assert event.component_id == component_id + + new_values.append(event.state_component) + new_times.append(event.t) + elif isinstance(event, interp.StepCompleted): + if not new_times: + continue + + istep += 1 + step_sizes.append(event.t - last_t) + last_t = event.t + + times.extend(new_times) + values.extend(new_values) + del new_times[:] + del new_values[:] + elif isinstance(event, interp.StepFailed): + del new_times[:] + del new_values[:] + + logger.info("failed step at t=%s" % event.t) + + times = np.array(times) + values = np.array(values) + step_sizes = np.array(step_sizes) + nsteps = len(step_sizes) + final_time = times[-1] + final_val = values[-1] + end_vals = [] + end_vals.append(final_val) + dts = [] + dts.append(10.0/nsteps) + from pytools.convergence import EOCRecorder + eocrec = EOCRecorder() + for i in range(1, 5): + fac = 2**i + nsteps_run = fac*nsteps + dt = np.zeros(nsteps_run) + + for j in range(0, nsteps_run, fac): + dt[j] = step_sizes[int(j/fac)]/fac + for k in range(1, fac): + dt[j+k] = step_sizes[int(j/fac)]/fac + + # Now that we have our new set of timesteps, do the run, + # same as before, but with adaptivity turned off. + interp_nonadapt.set_up(t_start=example.t_start, dt_start=dt[0], + context={component_id: y}) + iout = 1 + for event in interp_nonadapt.run(t_end=final_time): + if isinstance(event, interp_nonadapt.StateComputed): + assert event.component_id == component_id + + end_val = event.state_component + elif isinstance(event, interp_nonadapt.StepCompleted): + if iout < nsteps*fac: + if event.t + dt[iout] >= final_time: + interp_nonadapt.dt = final_time - event.t + else: + interp_nonadapt.dt = dt[iout] + iout += 1 + else: + interp_nonadapt.dt = final_time - event.t + + end_vals.append(end_val) + dts.append(10.0/iout) + + # Now calculate errors using the final time as the + # true solution (self-convergence) + for i in range(1, 5): + eocrec.add_data_point(dts[i-1], + np.linalg.norm(end_vals[i-1] - end_vals[-1])) + + print(eocrec.pretty_print()) + orderest = eocrec.estimate_order_of_convergence()[0, 1] + assert orderest > 0.9 * expected_order + + +# }}} + + +if __name__ == "__main__": + if len(sys.argv) > 1: + exec(sys.argv[1]) + else: + from pytest import main + main([__file__]) + +# vim: filetype=pyopencl:fdm=marker diff --git a/test/test_embedded_adams_explicit.py b/test/test_embedded_adams_explicit.py new file mode 100755 index 0000000..accfdeb --- /dev/null +++ b/test/test_embedded_adams_explicit.py @@ -0,0 +1,219 @@ +#! /usr/bin/env python + +__copyright__ = "Copyright (C) 2014 Andreas Kloeckner" + +__license__ = """ +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +""" + +# avoid spurious: pytest.mark.parametrize is not callable +# pylint: disable=not-callable + +import sys +import pytest + +from leap.multistep import EmbeddedAdamsMethodBuilder +import numpy as np + +import logging + +from utils import ( # noqa + python_method_impl_interpreter as pmi_int, + python_method_impl_codegen as pmi_cg) + +logger = logging.getLogger(__name__) + + +# {{{ non-adaptive test + +@pytest.mark.parametrize(("method", "expected_order", "implicit"), [ + (EmbeddedAdamsMethodBuilder("y", order=2, use_high_order=False, + implicit=False), 2, False), + (EmbeddedAdamsMethodBuilder("y", order=3, use_high_order=False, + implicit=False), 3, False), + (EmbeddedAdamsMethodBuilder("y", order=4, use_high_order=False, + implicit=False), 4, False), + ]) +def test_embedded_accuracy(python_method_impl, method, expected_order, implicit, + show_dag=False, plot_solution=False): + from utils import check_simple_convergence + # Order 2 is persnickety and barely fails its convergence test + dts = 2 ** -np.array(range(5, 8), dtype=np.float64) + check_simple_convergence(method=method, method_impl=python_method_impl, + expected_order=expected_order, show_dag=show_dag, + plot_solution=plot_solution, implicit=implicit, + dts=dts) + +# }}} + +# {{{ adaptive test + + +@pytest.mark.parametrize(("method", "ss_frac", "ss_targ", "bs_frac", "bs_targ"), [ + (EmbeddedAdamsMethodBuilder("y", order=2, rtol=1e-6, + implicit=False), 0.4, 0.005, 0.15, 0.01), + (EmbeddedAdamsMethodBuilder("y", order=3, rtol=1e-6, + implicit=False), 0.35, 0.001, 0.16, 0.005), + (EmbeddedAdamsMethodBuilder("y", order=4, rtol=1e-6, + implicit=False), 0.35, 0.0001, 0.16, 0.001), + ]) +def test_adaptive_timestep(python_method_impl, method, ss_frac, ss_targ, bs_frac, + bs_targ, show_dag=False, plot=False): + from utils import check_adaptive_timestep + check_adaptive_timestep(python_method_impl=python_method_impl, method=method, + ss_frac=ss_frac, bs_frac=bs_frac, ss_targ=ss_targ, + bs_targ=bs_targ, show_dag=show_dag, plot=plot, + implicit=False) + + +@pytest.mark.parametrize(("method", "expected_order"), [ + (EmbeddedAdamsMethodBuilder("y", order=2, rtol=1e-6, implicit=False), 2), + (EmbeddedAdamsMethodBuilder("y", order=3, rtol=1e-6, implicit=False), 3), + (EmbeddedAdamsMethodBuilder("y", order=4, rtol=1e-6, implicit=False), 4), + ]) +def test_adaptive_accuracy(method, expected_order, show_dag=False, + plot=False, python_method_impl=pmi_cg): + # Use "DEBUG" to trace execution + logging.basicConfig(level=logging.INFO) + + component_id = method.component_id + code = method.generate() + + code_nonadapt = EmbeddedAdamsMethodBuilder("y", order=expected_order, + implicit=False).generate() + + if show_dag: + from dagrt.language import show_dependency_graph + show_dependency_graph(code) + + from stiff_test_systems import VanDerPolProblem + example = VanDerPolProblem() + y = example.initial() + + interp = python_method_impl(code, + function_map={"" + component_id: example}) + + interp_nonadapt = python_method_impl(code_nonadapt, + function_map={"" + component_id: example}) + + interp.set_up(t_start=example.t_start, dt_start=1e-5, context={component_id: y}) + + times = [] + values = [] + + new_times = [] + new_values = [] + + last_t = 0 + step_sizes = [] + nsteps = [] + istep = 0 + + # Initial run to establish step sizes. + for event in interp.run(t_end=example.t_end/10.0): + if isinstance(event, interp.StateComputed): + assert event.component_id == component_id + + new_values.append(event.state_component) + new_times.append(event.t) + elif isinstance(event, interp.StepCompleted): + if not new_times: + continue + + istep += 1 + step_sizes.append(event.t - last_t) + last_t = event.t + + times.extend(new_times) + values.extend(new_values) + del new_times[:] + del new_values[:] + elif isinstance(event, interp.StepFailed): + del new_times[:] + del new_values[:] + + logger.info("failed step at t=%s" % event.t) + + times = np.array(times) + values = np.array(values) + step_sizes = np.array(step_sizes) + nsteps = len(step_sizes) + final_time = times[-1] + final_val = values[-1] + end_vals = [] + end_vals.append(final_val) + dts = [] + dts.append(10.0/nsteps) + from pytools.convergence import EOCRecorder + eocrec = EOCRecorder() + for i in range(1, 5): + fac = 2**i + nsteps_run = fac*nsteps + dt = np.zeros(nsteps_run) + + for j in range(0, nsteps_run, fac): + dt[j] = step_sizes[int(j/fac)]/fac + for k in range(1, fac): + dt[j+k] = step_sizes[int(j/fac)]/fac + + # Now that we have our new set of timesteps, do the run, + # same as before, but with adaptivity turned off. + interp_nonadapt.set_up(t_start=example.t_start, dt_start=dt[0], + context={component_id: y}) + iout = 1 + for event in interp_nonadapt.run(t_end=final_time): + if isinstance(event, interp_nonadapt.StateComputed): + assert event.component_id == component_id + + end_val = event.state_component + elif isinstance(event, interp_nonadapt.StepCompleted): + if iout < nsteps*fac: + if event.t + dt[iout] >= final_time: + interp_nonadapt.dt = final_time - event.t + else: + interp_nonadapt.dt = dt[iout] + iout += 1 + else: + interp_nonadapt.dt = final_time - event.t + + end_vals.append(end_val) + dts.append(10.0/iout) + + # Now calculate errors using the final time as the + # true solution (self-convergence) + for i in range(1, 5): + eocrec.add_data_point(dts[i-1], + np.linalg.norm(end_vals[i-1] - end_vals[-1])) + + print(eocrec.pretty_print()) + orderest = eocrec.estimate_order_of_convergence()[0, 1] + assert orderest > 0.9 * expected_order + + +# }}} + + +if __name__ == "__main__": + if len(sys.argv) > 1: + exec(sys.argv[1]) + else: + from pytest import main + main([__file__]) + +# vim: filetype=pyopencl:fdm=marker diff --git a/test/test_rk.py b/test/test_rk.py index 6786476..4d5f8c0 100755 --- a/test/test_rk.py +++ b/test/test_rk.py @@ -35,9 +35,11 @@ RK3MethodBuilder, RK4MethodBuilder, RK5MethodBuilder, LSRK4MethodBuilder, SSPRK22MethodBuilder, SSPRK33MethodBuilder, + BackwardEulerMethodBuilder, DIRK2MethodBuilder, + DIRK3MethodBuilder, DIRK4MethodBuilder, + DIRK5MethodBuilder, ) from leap.rk.imex import KennedyCarpenterIMEXARK4MethodBuilder -import numpy as np import logging @@ -77,6 +79,21 @@ def test_rk_accuracy(python_method_impl, method, expected_order, expected_order=expected_order, show_dag=show_dag, plot_solution=plot_solution) + +@pytest.mark.parametrize(("method", "expected_order"), [ + (BackwardEulerMethodBuilder("y"), 1), + (DIRK2MethodBuilder("y"), 2), + (DIRK3MethodBuilder("y"), 3), + (DIRK4MethodBuilder("y"), 4), + (DIRK5MethodBuilder("y"), 5), + ]) +def test_implicit_rk_accuracy(python_method_impl, method, expected_order, + show_dag=False, plot_solution=False): + from utils import check_simple_convergence + check_simple_convergence(method=method, method_impl=python_method_impl, + expected_order=expected_order, show_dag=show_dag, + plot_solution=plot_solution, implicit=True) + # }}} @@ -90,77 +107,11 @@ def test_rk_accuracy(python_method_impl, method, expected_order, ]) def test_adaptive_timestep(python_method_impl, method, show_dag=False, plot=False): - # Use "DEBUG" to trace execution - logging.basicConfig(level=logging.INFO) - - component_id = method.component_id - code = method.generate() - print(code) - #1/0 - - if show_dag: - from dagrt.language import show_dependency_graph - show_dependency_graph(code) - - from stiff_test_systems import VanDerPolProblem - example = VanDerPolProblem() - y = example.initial() - - interp = python_method_impl(code, - function_map={"" + component_id: example}) - interp.set_up(t_start=example.t_start, dt_start=1e-5, context={component_id: y}) - - times = [] - values = [] - - new_times = [] - new_values = [] - - last_t = 0 - step_sizes = [] - - for event in interp.run(t_end=example.t_end): - if isinstance(event, interp.StateComputed): - assert event.component_id == component_id - - new_values.append(event.state_component) - new_times.append(event.t) - elif isinstance(event, interp.StepCompleted): - if not new_times: - continue - - step_sizes.append(event.t - last_t) - last_t = event.t - - times.extend(new_times) - values.extend(new_values) - del new_times[:] - del new_values[:] - elif isinstance(event, interp.StepFailed): - del new_times[:] - del new_values[:] - - logger.info("failed step at t=%s" % event.t) - - times = np.array(times) - values = np.array(values) - step_sizes = np.array(step_sizes) - - if plot: - import matplotlib.pyplot as pt - pt.plot(times, values[:, 1], "x-") - pt.show() - pt.plot(times, step_sizes, "x-") - pt.show() - - step_sizes = np.array(step_sizes) - small_step_frac = len(np.nonzero(step_sizes < 0.01)[0]) / len(step_sizes) - big_step_frac = len(np.nonzero(step_sizes > 0.05)[0]) / len(step_sizes) - - print("small_step_frac (<0.01): %g - big_step_frac (>.05): %g" - % (small_step_frac, big_step_frac)) - assert small_step_frac <= 0.35, small_step_frac - assert big_step_frac >= 0.16, big_step_frac + from utils import check_adaptive_timestep + check_adaptive_timestep(python_method_impl=python_method_impl, method=method, + ss_frac=0.36, bs_frac=0.16, ss_targ=0.01, + bs_targ=0.05, show_dag=show_dag, plot=plot, + implicit=False) # }}} diff --git a/test/utils.py b/test/utils.py index 60d3ac3..8895214 100644 --- a/test/utils.py +++ b/test/utils.py @@ -21,6 +21,9 @@ """ import numpy as np +import logging + +logger = logging.getLogger(__name__) # {{{ things to pass for python_method_impl @@ -38,6 +41,20 @@ def python_method_impl_codegen(code, **kwargs): # }}} +def solver(f, t, sub_y, coeff, guess): + from scipy.optimize import root + return root(lambda unk: unk - f(t=t, y=sub_y + coeff*unk), guess).x + + +def solver_hook(solve_expr, solve_var, solver_id, guess): + from dagrt.expression import match, substitute + + pieces = match("unk - rhs(t=t, y=sub_y + coeff*unk)", solve_expr, + pre_match={"unk": solve_var}) + pieces["guess"] = guess + return substitute("solver(t, sub_y, coeff, guess)", pieces) + + def execute_and_return_single_result(python_method_impl, code, initial_context=None, max_steps=1): if initial_context is None: @@ -97,18 +114,22 @@ def __call__(self, t, y): def check_simple_convergence(method, method_impl, expected_order, problem=None, dts=_default_dts, - show_dag=False, plot_solution=False): + show_dag=False, plot_solution=False, implicit=False): if problem is None: problem = DefaultProblem() component_id = method.component_id code = method.generate() - print(code) + #print(code) if show_dag: from dagrt.language import show_dependency_graph show_dependency_graph(code) + if implicit: + from leap.implicit import replace_AssignImplicit + code = replace_AssignImplicit(code, {"solve": solver_hook}) + from pytools.convergence import EOCRecorder eocrec = EOCRecorder() @@ -117,9 +138,16 @@ def check_simple_convergence(method, method_impl, expected_order, y = problem.initial() final_t = problem.t_end - interp = method_impl(code, function_map={ - "" + component_id: problem, - }) + if implicit: + from functools import partial + interp = method_impl(code, function_map={ + "" + component_id: problem, + "solver": partial(solver, problem), + }) + else: + interp = method_impl(code, function_map={ + "" + component_id: problem, + }) interp.set_up(t_start=t, dt_start=dt, context={component_id: y}) times = [] @@ -150,7 +178,94 @@ def check_simple_convergence(method, method_impl, expected_order, print(eocrec.pretty_print()) orderest = eocrec.estimate_order_of_convergence()[0, 1] - assert orderest > expected_order * 0.9 + assert orderest > expected_order * 0.89 + + +def check_adaptive_timestep(python_method_impl, method, ss_frac, bs_frac, + ss_targ, bs_targ, show_dag=False, plot=False, + implicit=False): + # Use "DEBUG" to trace execution + logging.basicConfig(level=logging.INFO) + + component_id = method.component_id + code = method.generate() + #print(code) + #1/0 + + if implicit: + from leap.implicit import replace_AssignImplicit + code = replace_AssignImplicit(code, {"solve": solver_hook}) + + if show_dag: + from dagrt.language import show_dependency_graph + show_dependency_graph(code) + + from stiff_test_systems import VanDerPolProblem + example = VanDerPolProblem() + y = example.initial() + + if implicit: + from functools import partial + interp = python_method_impl(code, + function_map={"" + component_id: example, + "solver": partial(solver, example)}) + else: + interp = python_method_impl(code, + function_map={"" + component_id: example}) + interp.set_up(t_start=example.t_start, dt_start=1e-5, context={component_id: y}) + + times = [] + values = [] + + new_times = [] + new_values = [] + + last_t = 0 + step_sizes = [] + + for event in interp.run(t_end=example.t_end/4.0): + if isinstance(event, interp.StateComputed): + assert event.component_id == component_id + + new_values.append(event.state_component) + new_times.append(event.t) + elif isinstance(event, interp.StepCompleted): + if not new_times: + continue + + step_sizes.append(event.t - last_t) + last_t = event.t + + times.extend(new_times) + values.extend(new_values) + del new_times[:] + del new_values[:] + elif isinstance(event, interp.StepFailed): + del new_times[:] + del new_values[:] + + logger.info("failed step at t=%s" % event.t) + + times = np.array(times) + values = np.array(values) + step_sizes = np.array(step_sizes) + + if plot: + import matplotlib.pyplot as pt + pt.clf() + pt.plot(times, values[:, 1], "x-") + pt.show() + pt.plot(times, step_sizes, "x-") + pt.show() + + step_sizes = np.array(step_sizes) + small_step_frac = len(np.nonzero(step_sizes < ss_targ)[0]) / len(step_sizes) + big_step_frac = len(np.nonzero(step_sizes > bs_targ)[0]) / len(step_sizes) + + print("small_step_frac (<%g): %g - big_step_frac (>%g): %g" + % (ss_targ, small_step_frac, bs_targ, big_step_frac)) + assert small_step_frac <= ss_frac, small_step_frac + assert big_step_frac >= bs_frac, big_step_frac # vim: foldmethod=marker