From 97287d42a7a9859c1d511210f61137e74b687367 Mon Sep 17 00:00:00 2001 From: shudson Date: Tue, 9 Dec 2025 19:48:47 -0600 Subject: [PATCH 01/11] Ensure gens with _id return with sim_id --- libensemble/manager.py | 13 ++++++++++++- libensemble/utils/misc.py | 9 +++++++-- 2 files changed, 19 insertions(+), 3 deletions(-) diff --git a/libensemble/manager.py b/libensemble/manager.py index 97f8f8225..22ae8b5d3 100644 --- a/libensemble/manager.py +++ b/libensemble/manager.py @@ -410,6 +410,14 @@ def _freeup_resources(self, w: int) -> None: if self.resources: self.resources.resource_manager.free_rsets(w) + def _ensure_sim_id_in_persis_in(self, D: npt.NDArray) -> None: + """Add sim_id to gen_specs persis_in if generator output contains sim_id (gest-api style generators only)""" + if self.gen_specs.get("generator") and len(D) > 0 and "sim_id" in D.dtype.names: + if "persis_in" not in self.gen_specs: + self.gen_specs["persis_in"] = [] + if "sim_id" not in self.gen_specs["persis_in"]: + self.gen_specs["persis_in"].append("sim_id") + def _send_work_order(self, Work: dict, w: int) -> None: """Sends an allocation function order to a worker""" logger.debug(f"Manager sending work unit to worker {w}") @@ -483,6 +491,7 @@ def _update_state_on_worker_msg(self, persis_info: dict, D_recv: dict, w: int) - final_data = D_recv.get("calc_out", None) if isinstance(final_data, np.ndarray): if calc_status is FINISHED_PERSISTENT_GEN_TAG and self.libE_specs.get("use_persis_return_gen", False): + self._ensure_sim_id_in_persis_in(final_data) self.hist.update_history_x_in(w, final_data, self.W[w]["gen_started_time"]) elif calc_status is FINISHED_PERSISTENT_SIM_TAG and self.libE_specs.get("use_persis_return_sim", False): self.hist.update_history_f(D_recv, self.kill_canceled_sims) @@ -500,7 +509,9 @@ def _update_state_on_worker_msg(self, persis_info: dict, D_recv: dict, w: int) - if calc_type == EVAL_SIM_TAG: self.hist.update_history_f(D_recv, self.kill_canceled_sims) if calc_type == EVAL_GEN_TAG: - self.hist.update_history_x_in(w, D_recv["calc_out"], self.W[w]["gen_started_time"]) + D = D_recv["calc_out"] + self._ensure_sim_id_in_persis_in(D) + self.hist.update_history_x_in(w, D, self.W[w]["gen_started_time"]) assert ( len(D_recv["calc_out"]) or np.any(self.W["active"]) or self.W[w]["persis_state"] ), "Gen must return work when is is the only thing active and not persistent." diff --git a/libensemble/utils/misc.py b/libensemble/utils/misc.py index 47823e281..74be3a5da 100644 --- a/libensemble/utils/misc.py +++ b/libensemble/utils/misc.py @@ -130,6 +130,9 @@ def list_dicts_to_np(list_dicts: list, dtype: list = None, mapping: dict = {}) - if not isinstance(list_dicts, list): # presumably already a numpy array, conversion not necessary return list_dicts + if "sim_id" not in mapping: + mapping["sim_id"] = ["_id"] + # first entry is used to determine dtype first = list_dicts[0] @@ -148,9 +151,11 @@ def list_dicts_to_np(list_dicts: list, dtype: list = None, mapping: dict = {}) - # append dtype of mapped float fields if len(mapping): + existing_names = [f[0] for f in dtype] for name in mapping: - size = len(mapping[name]) - dtype.append(_decide_dtype(name, 0.0, size)) # float + if name not in existing_names: + size = len(mapping[name]) + dtype.append(_decide_dtype(name, 0.0, size)) # float out = np.zeros(len(list_dicts), dtype=dtype) From d78856bf351babf71bec2e7c9c687e18ad3c2f40 Mon Sep 17 00:00:00 2001 From: shudson Date: Tue, 9 Dec 2025 19:51:29 -0600 Subject: [PATCH 02/11] Add Optimas Ax test --- .../tests/regression_tests/test_optimas_ax.py | 113 ++++++++++++++++++ 1 file changed, 113 insertions(+) create mode 100644 libensemble/tests/regression_tests/test_optimas_ax.py diff --git a/libensemble/tests/regression_tests/test_optimas_ax.py b/libensemble/tests/regression_tests/test_optimas_ax.py new file mode 100644 index 000000000..f29ff9e2c --- /dev/null +++ b/libensemble/tests/regression_tests/test_optimas_ax.py @@ -0,0 +1,113 @@ +""" +Tests libEnsemble with Optimas AxGenerators + +*****curerntly using same sim as xopt - seeing if just swap out gens***** + +*****currently fixing nworkers to batch_size***** + +Execute via one of the following commands (e.g. 4 workers): + mpiexec -np 5 python test_optimas_ax.py + python test_optimas_ax.py -n 4 + +When running with the above commands, the number of concurrent evaluations of +the objective function will be 4 as the generator is on the manager. + +""" + +# Do not change these lines - they are parsed by run-tests.sh +# TESTSUITE_COMMS: mpi local +# TESTSUITE_NPROCS: 4 +# TESTSUITE_EXTRA: true + +import pdb_si +import numpy as np +from gest_api.vocs import VOCS + +from optimas.core import Task +from optimas.generators import ( + AxSingleFidelityGenerator, + AxMultiFidelityGenerator, + AxMultitaskGenerator, + AxClientGenerator, +) + +from libensemble import Ensemble +from libensemble.alloc_funcs.start_only_persistent import only_persistent_gens as alloc_f +from libensemble.specs import AllocSpecs, ExitCriteria, GenSpecs, LibeSpecs, SimSpecs + + +# SH TODO - should check constant1 is present +# Adapted from Xopt/xopt/resources/testing.py +def xtest_sim(H, persis_info, sim_specs, _): + """ + Simple sim function that takes x1, x2, constant1 from H and returns y1, c1. + Logic: y1 = x2, c1 = x1 + """ + batch = len(H) + H_o = np.zeros(batch, dtype=sim_specs["out"]) + + for i in range(batch): + x1 = H["x1"][i] + x2 = H["x2"][i] + # constant1 is available but not used in the calculation + + H_o["y1"][i] = x2 + H_o["c1"][i] = x1 + + return H_o, persis_info + + +# Main block is necessary only when using local comms with spawn start method (default on macOS and Windows). +if __name__ == "__main__": + + n = 2 + batch_size = 4 + + libE_specs = LibeSpecs(gen_on_manager=True, nworkers=batch_size) + + vocs = VOCS( + variables={"x1": [0, 1.0], "x2": [0, 10.0], + # "trial_type": {"task_1", "task_2"} + }, + objectives={"y1": "MINIMIZE"}, + constraints={"c1": ["GREATER_THAN", 0.5]}, + constants={"constant1": 1.0}, # SH DO I WNAT THIS... - see optimas tests + ) + + # **TODO first 2 get the sim_id issue (fixed on other branch - but may want to change to not use _id..) + gen = AxSingleFidelityGenerator(vocs=vocs) + # gen = AxMultiFidelityGenerator(vocs=vocs) + + # task1 = Task("task_1", n_init=2, n_opt=1) + # task2 = Task("task_2", n_init=5, n_opt=3) + # gen = AxMultitaskGenerator(vocs=vocs) + + gen_specs = GenSpecs( + generator=gen, + batch_size=batch_size, + vocs=vocs, + ) + + sim_specs = SimSpecs( + sim_f=xtest_sim, + vocs=vocs, + ) + + alloc_specs = AllocSpecs(alloc_f=alloc_f) + exit_criteria = ExitCriteria(sim_max=20) + + workflow = Ensemble( + libE_specs=libE_specs, + sim_specs=sim_specs, + alloc_specs=alloc_specs, + gen_specs=gen_specs, + exit_criteria=exit_criteria, + ) + + H, _, _ = workflow.run() + + # Perform the run + if workflow.is_manager: + print(f"Completed {len(H)} simulations") + assert np.array_equal(H["y1"], H["x2"]) + assert np.array_equal(H["c1"], H["x1"]) From c60ad273cf266ea44209c4b3717f6b31423e1783 Mon Sep 17 00:00:00 2001 From: shudson Date: Thu, 11 Dec 2025 20:24:24 -0600 Subject: [PATCH 03/11] Add working multi-task test --- .../tests/regression_tests/test_optimas_ax.py | 67 +++++++++---------- 1 file changed, 30 insertions(+), 37 deletions(-) diff --git a/libensemble/tests/regression_tests/test_optimas_ax.py b/libensemble/tests/regression_tests/test_optimas_ax.py index f29ff9e2c..dc8e56d71 100644 --- a/libensemble/tests/regression_tests/test_optimas_ax.py +++ b/libensemble/tests/regression_tests/test_optimas_ax.py @@ -1,7 +1,5 @@ """ -Tests libEnsemble with Optimas AxGenerators - -*****curerntly using same sim as xopt - seeing if just swap out gens***** +Tests libEnsemble with Optimas Ax Generators *****currently fixing nworkers to batch_size***** @@ -19,7 +17,6 @@ # TESTSUITE_NPROCS: 4 # TESTSUITE_EXTRA: true -import pdb_si import numpy as np from gest_api.vocs import VOCS @@ -36,65 +33,61 @@ from libensemble.specs import AllocSpecs, ExitCriteria, GenSpecs, LibeSpecs, SimSpecs -# SH TODO - should check constant1 is present -# Adapted from Xopt/xopt/resources/testing.py -def xtest_sim(H, persis_info, sim_specs, _): - """ - Simple sim function that takes x1, x2, constant1 from H and returns y1, c1. - Logic: y1 = x2, c1 = x1 - """ - batch = len(H) - H_o = np.zeros(batch, dtype=sim_specs["out"]) - - for i in range(batch): - x1 = H["x1"][i] - x2 = H["x2"][i] - # constant1 is available but not used in the calculation +def eval_func_multitask(input_params): + """Evaluation function for task1 or task2 in multitask test""" + print(f'input_params: {input_params}') + x0 = input_params["x0"] + x1 = input_params["x1"] + trial_type = input_params["trial_type"] - H_o["y1"][i] = x2 - H_o["c1"][i] = x1 + if trial_type == "task_1": + result = -(x0 + 10 * np.cos(x0)) * (x1 + 5 * np.cos(x1)) + else: + result = -0.5 * (x0 + 10 * np.cos(x0)) * (x1 + 5 * np.cos(x1)) - return H_o, persis_info + output_params = {} + output_params["f"] = result + return output_params # Main block is necessary only when using local comms with spawn start method (default on macOS and Windows). if __name__ == "__main__": n = 2 - batch_size = 4 + batch_size = 2 libE_specs = LibeSpecs(gen_on_manager=True, nworkers=batch_size) vocs = VOCS( - variables={"x1": [0, 1.0], "x2": [0, 10.0], - # "trial_type": {"task_1", "task_2"} - }, - objectives={"y1": "MINIMIZE"}, - constraints={"c1": ["GREATER_THAN", 0.5]}, - constants={"constant1": 1.0}, # SH DO I WNAT THIS... - see optimas tests + variables={ + "x0": [-50.0, 5.0], + "x1": [-5.0, 15.0], + "trial_type": {"task_1", "task_2"}, + }, + objectives={"f": "MAXIMIZE"}, ) - # **TODO first 2 get the sim_id issue (fixed on other branch - but may want to change to not use _id..) - gen = AxSingleFidelityGenerator(vocs=vocs) + # gen = AxSingleFidelityGenerator(vocs=vocs) # gen = AxMultiFidelityGenerator(vocs=vocs) - # task1 = Task("task_1", n_init=2, n_opt=1) - # task2 = Task("task_2", n_init=5, n_opt=3) - # gen = AxMultitaskGenerator(vocs=vocs) + task1 = Task("task_1", n_init=2, n_opt=1) + task2 = Task("task_2", n_init=5, n_opt=3) + gen = AxMultitaskGenerator(vocs=vocs, hifi_task=task1, lofi_task=task2) gen_specs = GenSpecs( generator=gen, + # init_batch_size=5, # fist want to see why doesn't work though batch_size=batch_size, vocs=vocs, ) sim_specs = SimSpecs( - sim_f=xtest_sim, + simulator=eval_func_multitask, vocs=vocs, ) alloc_specs = AllocSpecs(alloc_f=alloc_f) - exit_criteria = ExitCriteria(sim_max=20) + exit_criteria = ExitCriteria(sim_max=15) workflow = Ensemble( libE_specs=libE_specs, @@ -108,6 +101,6 @@ def xtest_sim(H, persis_info, sim_specs, _): # Perform the run if workflow.is_manager: + workflow.save_output(__file__) print(f"Completed {len(H)} simulations") - assert np.array_equal(H["y1"], H["x2"]) - assert np.array_equal(H["c1"], H["x1"]) + From 1e341b9c7f342415f32889dcb9293ed2371c009f Mon Sep 17 00:00:00 2001 From: shudson Date: Thu, 11 Dec 2025 20:32:01 -0600 Subject: [PATCH 04/11] Target Optimas branch for testing --- .github/workflows/extra.yml | 2 +- libensemble/tests/regression_tests/test_optimas_ax.py | 8 +++----- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/.github/workflows/extra.yml b/.github/workflows/extra.yml index f024a08ea..cdc399644 100644 --- a/.github/workflows/extra.yml +++ b/.github/workflows/extra.yml @@ -103,7 +103,7 @@ jobs: conda install numpy scipy conda install -c conda-forge pytorch-cpu pip install --upgrade-strategy=only-if-needed git+https://github.com/xopt-org/xopt.git@generator_standard - pip install --no-deps git+https://github.com/optimas-org/optimas.git@main + pip install --no-deps git+https://github.com/optimas-org/optimas.git@multitask_uses_id - name: Remove test using octave, gpcam on Python 3.13 if: matrix.python-version >= '3.13' diff --git a/libensemble/tests/regression_tests/test_optimas_ax.py b/libensemble/tests/regression_tests/test_optimas_ax.py index dc8e56d71..66530604a 100644 --- a/libensemble/tests/regression_tests/test_optimas_ax.py +++ b/libensemble/tests/regression_tests/test_optimas_ax.py @@ -22,10 +22,9 @@ from optimas.core import Task from optimas.generators import ( - AxSingleFidelityGenerator, - AxMultiFidelityGenerator, + # AxSingleFidelityGenerator, + # AxMultiFidelityGenerator, AxMultitaskGenerator, - AxClientGenerator, ) from libensemble import Ensemble @@ -69,7 +68,7 @@ def eval_func_multitask(input_params): # gen = AxSingleFidelityGenerator(vocs=vocs) # gen = AxMultiFidelityGenerator(vocs=vocs) - + task1 = Task("task_1", n_init=2, n_opt=1) task2 = Task("task_2", n_init=5, n_opt=3) gen = AxMultitaskGenerator(vocs=vocs, hifi_task=task1, lofi_task=task2) @@ -103,4 +102,3 @@ def eval_func_multitask(input_params): if workflow.is_manager: workflow.save_output(__file__) print(f"Completed {len(H)} simulations") - From a79eb566a0380d1859d9e8c5845bb2999b1a712f Mon Sep 17 00:00:00 2001 From: shudson Date: Thu, 11 Dec 2025 20:56:14 -0600 Subject: [PATCH 05/11] Add tests with single/multi fidelity and multitask gens --- .../regression_tests/test_optimas_ax_mf.py | 85 +++++++++++++++++ .../test_optimas_ax_multitask.py | 95 +++++++++++++++++++ .../regression_tests/test_optimas_ax_sf.py | 85 +++++++++++++++++ 3 files changed, 265 insertions(+) create mode 100644 libensemble/tests/regression_tests/test_optimas_ax_mf.py create mode 100644 libensemble/tests/regression_tests/test_optimas_ax_multitask.py create mode 100644 libensemble/tests/regression_tests/test_optimas_ax_sf.py diff --git a/libensemble/tests/regression_tests/test_optimas_ax_mf.py b/libensemble/tests/regression_tests/test_optimas_ax_mf.py new file mode 100644 index 000000000..d46776403 --- /dev/null +++ b/libensemble/tests/regression_tests/test_optimas_ax_mf.py @@ -0,0 +1,85 @@ +""" +Tests libEnsemble with Optimas Multi-Fidelity Ax Generator + +*****currently fixing nworkers to batch_size***** + +Execute via one of the following commands (e.g. 4 workers): + mpiexec -np 5 python test_optimas_ax_mf.py + python test_optimas_ax_mf.py -n 4 + +When running with the above commands, the number of concurrent evaluations of +the objective function will be 4 as the generator is on the manager. + +""" + +# Do not change these lines - they are parsed by run-tests.sh +# TESTSUITE_COMMS: mpi local +# TESTSUITE_NPROCS: 4 +# TESTSUITE_EXTRA: true + +import numpy as np +from gest_api.vocs import VOCS + +from optimas.core import Task +from optimas.generators import AxMultiFidelityGenerator + +from libensemble import Ensemble +from libensemble.alloc_funcs.start_only_persistent import only_persistent_gens as alloc_f +from libensemble.specs import AllocSpecs, ExitCriteria, GenSpecs, LibeSpecs, SimSpecs + + +def eval_func_mf(input_params): + """Evaluation function for multifidelity test.""" + x0 = input_params["x0"] + x1 = input_params["x1"] + resolution = input_params["res"] + result = -( + (x0 + 10 * np.cos(x0 + 0.1 * resolution)) + * (x1 + 5 * np.cos(x1 - 0.2 * resolution)) + ) + return {"f": result} + + +# Main block is necessary only when using local comms with spawn start method (default on macOS and Windows). +if __name__ == "__main__": + + n = 2 + batch_size = 2 + + libE_specs = LibeSpecs(gen_on_manager=True, nworkers=batch_size) + + vocs = VOCS( + variables={"x0": [-50.0, 5.0], "x1": [-5.0, 15.0], "res": [1.0, 8.0]}, + objectives={"f": "MAXIMIZE"}, + ) + + gen = AxMultiFidelityGenerator(vocs=vocs) + + gen_specs = GenSpecs( + generator=gen, + batch_size=batch_size, + vocs=vocs, + ) + + sim_specs = SimSpecs( + simulator=eval_func_mf, + vocs=vocs, + ) + + alloc_specs = AllocSpecs(alloc_f=alloc_f) + exit_criteria = ExitCriteria(sim_max=6) + + workflow = Ensemble( + libE_specs=libE_specs, + sim_specs=sim_specs, + alloc_specs=alloc_specs, + gen_specs=gen_specs, + exit_criteria=exit_criteria, + ) + + H, _, _ = workflow.run() + + # Perform the run + if workflow.is_manager: + workflow.save_output(__file__) + print(f"Completed {len(H)} simulations") diff --git a/libensemble/tests/regression_tests/test_optimas_ax_multitask.py b/libensemble/tests/regression_tests/test_optimas_ax_multitask.py new file mode 100644 index 000000000..d942fcadc --- /dev/null +++ b/libensemble/tests/regression_tests/test_optimas_ax_multitask.py @@ -0,0 +1,95 @@ +""" +Tests libEnsemble with Optimas Multitask Ax Generator + +*****currently fixing nworkers to batch_size***** + +Execute via one of the following commands (e.g. 4 workers): + mpiexec -np 5 python test_optimas_ax.py + python test_optimas_ax.py -n 4 + +When running with the above commands, the number of concurrent evaluations of +the objective function will be 4 as the generator is on the manager. + +""" + +# Do not change these lines - they are parsed by run-tests.sh +# TESTSUITE_COMMS: mpi local +# TESTSUITE_NPROCS: 4 +# TESTSUITE_EXTRA: true + +import numpy as np +from gest_api.vocs import VOCS + +from optimas.core import Task +from optimas.generators import AxMultitaskGenerator + +from libensemble import Ensemble +from libensemble.alloc_funcs.start_only_persistent import only_persistent_gens as alloc_f +from libensemble.specs import AllocSpecs, ExitCriteria, GenSpecs, LibeSpecs, SimSpecs + + +def eval_func_multitask(input_params): + """Evaluation function for task1 or task2 in multitask test""" + print(f'input_params: {input_params}') + x0 = input_params["x0"] + x1 = input_params["x1"] + trial_type = input_params["trial_type"] + + if trial_type == "task_1": + result = -(x0 + 10 * np.cos(x0)) * (x1 + 5 * np.cos(x1)) + else: + result = -0.5 * (x0 + 10 * np.cos(x0)) * (x1 + 5 * np.cos(x1)) + + output_params = {"f": result} + return output_params + + +# Main block is necessary only when using local comms with spawn start method (default on macOS and Windows). +if __name__ == "__main__": + + n = 2 + batch_size = 2 + + libE_specs = LibeSpecs(gen_on_manager=True, nworkers=batch_size) + + vocs = VOCS( + variables={ + "x0": [-50.0, 5.0], + "x1": [-5.0, 15.0], + "trial_type": {"task_1", "task_2"}, + }, + objectives={"f": "MAXIMIZE"}, + ) + + task1 = Task("task_1", n_init=2, n_opt=1) + task2 = Task("task_2", n_init=5, n_opt=3) + gen = AxMultitaskGenerator(vocs=vocs, hifi_task=task1, lofi_task=task2) + + gen_specs = GenSpecs( + generator=gen, + batch_size=batch_size, + vocs=vocs, + ) + + sim_specs = SimSpecs( + simulator=eval_func_multitask, + vocs=vocs, + ) + + alloc_specs = AllocSpecs(alloc_f=alloc_f) + exit_criteria = ExitCriteria(sim_max=15) + + workflow = Ensemble( + libE_specs=libE_specs, + sim_specs=sim_specs, + alloc_specs=alloc_specs, + gen_specs=gen_specs, + exit_criteria=exit_criteria, + ) + + H, _, _ = workflow.run() + + # Perform the run + if workflow.is_manager: + workflow.save_output(__file__) + print(f"Completed {len(H)} simulations") diff --git a/libensemble/tests/regression_tests/test_optimas_ax_sf.py b/libensemble/tests/regression_tests/test_optimas_ax_sf.py new file mode 100644 index 000000000..de7ec99dc --- /dev/null +++ b/libensemble/tests/regression_tests/test_optimas_ax_sf.py @@ -0,0 +1,85 @@ +""" +Tests libEnsemble with Optimas Single-Fidelity Ax Generator + +*****currently fixing nworkers to batch_size***** + +Execute via one of the following commands (e.g. 4 workers): + mpiexec -np 5 python test_optimas_ax_sf.py + python test_optimas_ax_sf.py -n 4 + +When running with the above commands, the number of concurrent evaluations of +the objective function will be 4 as the generator is on the manager. + +""" + +# Do not change these lines - they are parsed by run-tests.sh +# TESTSUITE_COMMS: mpi local +# TESTSUITE_NPROCS: 4 +# TESTSUITE_EXTRA: true + +import numpy as np +from gest_api.vocs import VOCS + +from optimas.core import Task +from optimas.generators import AxSingleFidelityGenerator + +from libensemble import Ensemble +from libensemble.alloc_funcs.start_only_persistent import only_persistent_gens as alloc_f +from libensemble.specs import AllocSpecs, ExitCriteria, GenSpecs, LibeSpecs, SimSpecs + + +def eval_func_sf(input_params): + """Evaluation function for single-fidelity test. """ + + x0 = input_params["x0"] + x1 = input_params["x1"] + result = -(x0 + 10 * np.cos(x0)) * (x1 + 5 * np.cos(x1)) + return {"f": result} + + +# Main block is necessary only when using local comms with spawn start method (default on macOS and Windows). +if __name__ == "__main__": + + n = 2 + batch_size = 2 + + libE_specs = LibeSpecs(gen_on_manager=True, nworkers=batch_size) + + vocs = VOCS( + variables={ + "x0": [-50.0, 5.0], + "x1": [-5.0, 15.0], + }, + objectives={"f": "MAXIMIZE"}, + ) + + gen = AxSingleFidelityGenerator(vocs=vocs) + + gen_specs = GenSpecs( + generator=gen, + batch_size=batch_size, + vocs=vocs, + ) + + sim_specs = SimSpecs( + simulator=eval_func_sf, + vocs=vocs, + ) + + alloc_specs = AllocSpecs(alloc_f=alloc_f) + exit_criteria = ExitCriteria(sim_max=10) + + workflow = Ensemble( + libE_specs=libE_specs, + sim_specs=sim_specs, + alloc_specs=alloc_specs, + gen_specs=gen_specs, + exit_criteria=exit_criteria, + ) + + H, _, _ = workflow.run() + + # Perform the run + if workflow.is_manager: + workflow.save_output(__file__) + print(f"Completed {len(H)} simulations") From b4dbc7d76fcfa3f474863aa07dc87fc2587486ea Mon Sep 17 00:00:00 2001 From: shudson Date: Thu, 11 Dec 2025 20:59:21 -0600 Subject: [PATCH 06/11] Remove old optimas test --- .../tests/regression_tests/test_optimas_ax.py | 104 ------------------ 1 file changed, 104 deletions(-) delete mode 100644 libensemble/tests/regression_tests/test_optimas_ax.py diff --git a/libensemble/tests/regression_tests/test_optimas_ax.py b/libensemble/tests/regression_tests/test_optimas_ax.py deleted file mode 100644 index 66530604a..000000000 --- a/libensemble/tests/regression_tests/test_optimas_ax.py +++ /dev/null @@ -1,104 +0,0 @@ -""" -Tests libEnsemble with Optimas Ax Generators - -*****currently fixing nworkers to batch_size***** - -Execute via one of the following commands (e.g. 4 workers): - mpiexec -np 5 python test_optimas_ax.py - python test_optimas_ax.py -n 4 - -When running with the above commands, the number of concurrent evaluations of -the objective function will be 4 as the generator is on the manager. - -""" - -# Do not change these lines - they are parsed by run-tests.sh -# TESTSUITE_COMMS: mpi local -# TESTSUITE_NPROCS: 4 -# TESTSUITE_EXTRA: true - -import numpy as np -from gest_api.vocs import VOCS - -from optimas.core import Task -from optimas.generators import ( - # AxSingleFidelityGenerator, - # AxMultiFidelityGenerator, - AxMultitaskGenerator, -) - -from libensemble import Ensemble -from libensemble.alloc_funcs.start_only_persistent import only_persistent_gens as alloc_f -from libensemble.specs import AllocSpecs, ExitCriteria, GenSpecs, LibeSpecs, SimSpecs - - -def eval_func_multitask(input_params): - """Evaluation function for task1 or task2 in multitask test""" - print(f'input_params: {input_params}') - x0 = input_params["x0"] - x1 = input_params["x1"] - trial_type = input_params["trial_type"] - - if trial_type == "task_1": - result = -(x0 + 10 * np.cos(x0)) * (x1 + 5 * np.cos(x1)) - else: - result = -0.5 * (x0 + 10 * np.cos(x0)) * (x1 + 5 * np.cos(x1)) - - output_params = {} - output_params["f"] = result - return output_params - - -# Main block is necessary only when using local comms with spawn start method (default on macOS and Windows). -if __name__ == "__main__": - - n = 2 - batch_size = 2 - - libE_specs = LibeSpecs(gen_on_manager=True, nworkers=batch_size) - - vocs = VOCS( - variables={ - "x0": [-50.0, 5.0], - "x1": [-5.0, 15.0], - "trial_type": {"task_1", "task_2"}, - }, - objectives={"f": "MAXIMIZE"}, - ) - - # gen = AxSingleFidelityGenerator(vocs=vocs) - # gen = AxMultiFidelityGenerator(vocs=vocs) - - task1 = Task("task_1", n_init=2, n_opt=1) - task2 = Task("task_2", n_init=5, n_opt=3) - gen = AxMultitaskGenerator(vocs=vocs, hifi_task=task1, lofi_task=task2) - - gen_specs = GenSpecs( - generator=gen, - # init_batch_size=5, # fist want to see why doesn't work though - batch_size=batch_size, - vocs=vocs, - ) - - sim_specs = SimSpecs( - simulator=eval_func_multitask, - vocs=vocs, - ) - - alloc_specs = AllocSpecs(alloc_f=alloc_f) - exit_criteria = ExitCriteria(sim_max=15) - - workflow = Ensemble( - libE_specs=libE_specs, - sim_specs=sim_specs, - alloc_specs=alloc_specs, - gen_specs=gen_specs, - exit_criteria=exit_criteria, - ) - - H, _, _ = workflow.run() - - # Perform the run - if workflow.is_manager: - workflow.save_output(__file__) - print(f"Completed {len(H)} simulations") From eeabf3f999e85704ef5c7c15121b785d786225be Mon Sep 17 00:00:00 2001 From: shudson Date: Fri, 12 Dec 2025 10:09:41 -0600 Subject: [PATCH 07/11] Make _id to sim_id more robust --- libensemble/utils/misc.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/libensemble/utils/misc.py b/libensemble/utils/misc.py index 74be3a5da..865bcdc94 100644 --- a/libensemble/utils/misc.py +++ b/libensemble/utils/misc.py @@ -130,12 +130,12 @@ def list_dicts_to_np(list_dicts: list, dtype: list = None, mapping: dict = {}) - if not isinstance(list_dicts, list): # presumably already a numpy array, conversion not necessary return list_dicts - if "sim_id" not in mapping: - mapping["sim_id"] = ["_id"] - # first entry is used to determine dtype first = list_dicts[0] + if "_id" in first and "sim_id" not in mapping: + mapping["sim_id"] = ["_id"] + # build a presumptive dtype new_dtype_names = _get_new_dtype_fields(first, mapping) combinable_names = _get_combinable_multidim_names(first, new_dtype_names) # [['x0', 'x1'], ['z']] @@ -166,6 +166,7 @@ def list_dicts_to_np(list_dicts: list, dtype: list = None, mapping: dict = {}) - out[output_name][j] = _pack_field(input_dict, input_names) else: out[output_name][j] = _pack_field(input_dict, mapping[output_name]) + return out From 5de05738dd9859b3ea03dfcf575bd0e18bb4b40e Mon Sep 17 00:00:00 2001 From: shudson Date: Fri, 12 Dec 2025 10:39:27 -0600 Subject: [PATCH 08/11] Handle empty dictionary --- libensemble/utils/misc.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/libensemble/utils/misc.py b/libensemble/utils/misc.py index 865bcdc94..3d94ff7d7 100644 --- a/libensemble/utils/misc.py +++ b/libensemble/utils/misc.py @@ -124,12 +124,16 @@ def _pack_field(input_dict: dict, field_names: list) -> tuple: def list_dicts_to_np(list_dicts: list, dtype: list = None, mapping: dict = {}) -> npt.NDArray: + """Convert list of dicts to numpy structured array""" if list_dicts is None: return None - if not isinstance(list_dicts, list): # presumably already a numpy array, conversion not necessary + if not isinstance(list_dicts, list): return list_dicts + if not list_dicts: + return np.array([], dtype=dtype if dtype else []) + # first entry is used to determine dtype first = list_dicts[0] @@ -142,7 +146,7 @@ def list_dicts_to_np(list_dicts: list, dtype: list = None, mapping: dict = {}) - if ( dtype is None - ): # rather roundabout. I believe default value gets set upon function instantiation. (default is mutable!) + ): # Default value gets set upon function instantiation (default is mutable). dtype = [] # build dtype of non-mapped fields. appending onto empty dtype @@ -155,7 +159,7 @@ def list_dicts_to_np(list_dicts: list, dtype: list = None, mapping: dict = {}) - for name in mapping: if name not in existing_names: size = len(mapping[name]) - dtype.append(_decide_dtype(name, 0.0, size)) # float + dtype.append(_decide_dtype(name, 0.0, size)) # default to float out = np.zeros(len(list_dicts), dtype=dtype) @@ -221,6 +225,7 @@ def unmap_numpy_array(array: npt.NDArray, mapping: dict = {}) -> npt.NDArray: def np_to_list_dicts(array: npt.NDArray, mapping: dict = {}) -> List[dict]: + """Convert numpy structured array to list of dicts""" if array is None: return None out = [] From 04a05650f6a7033ea73b2a5191d10f1b3bc65a65 Mon Sep 17 00:00:00 2001 From: shudson Date: Fri, 12 Dec 2025 10:41:44 -0600 Subject: [PATCH 09/11] Formatting tests --- libensemble/tests/regression_tests/test_optimas_ax_mf.py | 3 +-- libensemble/tests/regression_tests/test_optimas_ax_sf.py | 4 +--- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/libensemble/tests/regression_tests/test_optimas_ax_mf.py b/libensemble/tests/regression_tests/test_optimas_ax_mf.py index d46776403..b6f43b3ed 100644 --- a/libensemble/tests/regression_tests/test_optimas_ax_mf.py +++ b/libensemble/tests/regression_tests/test_optimas_ax_mf.py @@ -18,9 +18,8 @@ # TESTSUITE_EXTRA: true import numpy as np -from gest_api.vocs import VOCS -from optimas.core import Task +from gest_api.vocs import VOCS from optimas.generators import AxMultiFidelityGenerator from libensemble import Ensemble diff --git a/libensemble/tests/regression_tests/test_optimas_ax_sf.py b/libensemble/tests/regression_tests/test_optimas_ax_sf.py index de7ec99dc..ba0b66c29 100644 --- a/libensemble/tests/regression_tests/test_optimas_ax_sf.py +++ b/libensemble/tests/regression_tests/test_optimas_ax_sf.py @@ -18,9 +18,8 @@ # TESTSUITE_EXTRA: true import numpy as np -from gest_api.vocs import VOCS -from optimas.core import Task +from gest_api.vocs import VOCS from optimas.generators import AxSingleFidelityGenerator from libensemble import Ensemble @@ -30,7 +29,6 @@ def eval_func_sf(input_params): """Evaluation function for single-fidelity test. """ - x0 = input_params["x0"] x1 = input_params["x1"] result = -(x0 + 10 * np.cos(x0)) * (x1 + 5 * np.cos(x1)) From 7697ef7748a72bd5a88f17efa4ae7d0da0b225ca Mon Sep 17 00:00:00 2001 From: shudson Date: Tue, 16 Dec 2025 16:49:11 -0600 Subject: [PATCH 10/11] Ingest initial H0 for gest-api generators --- libensemble/specs.py | 5 +++++ libensemble/utils/runners.py | 16 +++++++++++----- 2 files changed, 16 insertions(+), 5 deletions(-) diff --git a/libensemble/specs.py b/libensemble/specs.py index dac1baae4..7d9ec92ae 100644 --- a/libensemble/specs.py +++ b/libensemble/specs.py @@ -247,6 +247,11 @@ def set_fields_from_vocs(self): persis_in_fields.extend(list(obj.keys())) self.persis_in = persis_in_fields + # Set inputs: same as persis_in for gest-api generators (needed for H0 ingestion) + if not self.inputs and self.generator is not None: + self.inputs = self.persis_in + print(f"inputs: {self.inputs}") + # Set outputs: variables + constants (what the generator produces) if not self.outputs: out_fields = [] diff --git a/libensemble/utils/runners.py b/libensemble/utils/runners.py index b0c78a7bc..0d96b099b 100644 --- a/libensemble/utils/runners.py +++ b/libensemble/utils/runners.py @@ -121,6 +121,9 @@ def _get_points_updates(self, batch_size: int) -> (npt.NDArray, npt.NDArray): def _convert_ingest(self, x: npt.NDArray) -> list: self.gen.ingest(np_to_list_dicts(x)) + def _convert_initial_ingest(self, x: npt.NDArray) -> list: + self.gen.ingest(np_to_list_dicts(x, mapping=getattr(self.gen, "variables_mapping", {}))) + def _loop_over_gen(self, tag, Work, H_in): """Interact with suggest/ingest generator that *does not* contain a background thread""" while tag not in [PERSIS_STOP, STOP_TAG]: @@ -139,12 +142,17 @@ def _get_initial_suggest(self, libE_info) -> npt.NDArray: def _start_generator_loop(self, tag, Work, H_in): """Start the generator loop after choosing best way of giving initial results to gen""" - self.gen.ingest(np_to_list_dicts(H_in, mapping=getattr(self.gen, "variables_mapping", {}))) + self._convert_initial_ingest(H_in) return self._loop_over_gen(tag, Work, H_in) def _persistent_result(self, calc_in, persis_info, libE_info): """Setup comms with manager, setup gen, loop gen to completion, return gen's results""" self.ps = PersistentSupport(libE_info, EVAL_GEN_TAG) + + # If H0 exists, ingest it into the generator before initial suggest + if calc_in is not None and len(calc_in) > 0: + self._convert_initial_ingest(calc_in) + # libE gens will hit the following line, but list_dicts_to_np will passthrough if the output is a numpy array H_out = list_dicts_to_np( self._get_initial_suggest(libE_info), @@ -182,10 +190,8 @@ def _get_points_updates(self, batch_size: int) -> (npt.NDArray, list): def _convert_ingest(self, x: npt.NDArray) -> list: self.gen.ingest_numpy(x) - def _start_generator_loop(self, tag, Work, H_in) -> npt.NDArray: - """Start the generator loop after choosing best way of giving initial results to gen""" - self.gen.ingest_numpy(H_in) - return self._loop_over_gen(tag, Work, H_in) # see parent class + def _convert_initial_ingest(self, x: npt.NDArray) -> list: + self.gen.ingest_numpy(x) class LibensembleGenThreadRunner(StandardGenRunner): From cd15f07d2855112c7535ecfaf59457fcc57dd521 Mon Sep 17 00:00:00 2001 From: shudson Date: Tue, 16 Dec 2025 17:05:15 -0600 Subject: [PATCH 11/11] Multitask test uses H0 --- .../test_optimas_ax_multitask.py | 56 +++++++++++-------- 1 file changed, 32 insertions(+), 24 deletions(-) diff --git a/libensemble/tests/regression_tests/test_optimas_ax_multitask.py b/libensemble/tests/regression_tests/test_optimas_ax_multitask.py index d942fcadc..bc419b2bd 100644 --- a/libensemble/tests/regression_tests/test_optimas_ax_multitask.py +++ b/libensemble/tests/regression_tests/test_optimas_ax_multitask.py @@ -1,6 +1,8 @@ """ Tests libEnsemble with Optimas Multitask Ax Generator +Runs an initial ensemble, followed by another using the first as an H0. + *****currently fixing nworkers to batch_size***** Execute via one of the following commands (e.g. 4 workers): @@ -61,16 +63,6 @@ def eval_func_multitask(input_params): objectives={"f": "MAXIMIZE"}, ) - task1 = Task("task_1", n_init=2, n_opt=1) - task2 = Task("task_2", n_init=5, n_opt=3) - gen = AxMultitaskGenerator(vocs=vocs, hifi_task=task1, lofi_task=task2) - - gen_specs = GenSpecs( - generator=gen, - batch_size=batch_size, - vocs=vocs, - ) - sim_specs = SimSpecs( simulator=eval_func_multitask, vocs=vocs, @@ -79,17 +71,33 @@ def eval_func_multitask(input_params): alloc_specs = AllocSpecs(alloc_f=alloc_f) exit_criteria = ExitCriteria(sim_max=15) - workflow = Ensemble( - libE_specs=libE_specs, - sim_specs=sim_specs, - alloc_specs=alloc_specs, - gen_specs=gen_specs, - exit_criteria=exit_criteria, - ) - - H, _, _ = workflow.run() - - # Perform the run - if workflow.is_manager: - workflow.save_output(__file__) - print(f"Completed {len(H)} simulations") + H0 = None + for run_num in range(2): + task1 = Task("task_1", n_init=2, n_opt=1) + task2 = Task("task_2", n_init=5, n_opt=3) + gen = AxMultitaskGenerator(vocs=vocs, hifi_task=task1, lofi_task=task2) + + gen_specs = GenSpecs( + generator=gen, + batch_size=batch_size, + vocs=vocs, + ) + + workflow = Ensemble( + libE_specs=libE_specs, + sim_specs=sim_specs, + alloc_specs=alloc_specs, + gen_specs=gen_specs, + exit_criteria=exit_criteria, + H0=H0, + ) + + H, _, _ = workflow.run() + + if run_num == 0: + H0 = H + + if workflow.is_manager: + if run_num == 1: + workflow.save_output("multitask_with_H0") + print(f"Second run completed: {len(H)} simulations")