@@ -172,10 +172,78 @@ def run_swmm(constant_flows, efd_parameters=None,verbose=False):
172172
173173 return {"data_log" : env .data_log ,"peak_filling_degrees" :peak_filling_degrees }
174174
175+
176+ # --- Shared evaluator for objective/constraint with caching ---
177+ def evaluate_cf_point (params_array ):
178+ """
179+ Compute objective and constraint once, cache into Sim_cf.computed_return_values,
180+ and return a dict with 'objective' and 'constraint'.
181+ """
182+ key = tuple (np .array (params_array ).flatten ())
183+ if key in Sim_cf .computed_return_values :
184+ return Sim_cf .computed_return_values [key ]
185+
186+ data = run_swmm (np .array (params_array ).flatten (), None , verbose = False )
187+
188+ # Objective (flow exceedance)
189+ flow_cost = 0.0
190+ for node_id , series in data ['data_log' ]['flow' ].items ():
191+ if '5' in node_id or '9' in node_id :
192+ continue
193+ flow_exceed = [x - 3.0 for x in series ]
194+ flow_exceed = [x if x > 0 else 0 for x in flow_exceed ]
195+ flow_cost += sum (flow_exceed )
196+ objective_cost = float (flow_cost )
197+
198+ # Constraint = flood + drainage
199+ flood_cost = 0.0
200+ for node_id , series in data ['data_log' ]['flooding' ].items ():
201+ if '5' in node_id or '9' in node_id :
202+ continue
203+ flood_cost += sum (series )
204+ if 0.0 < flood_cost < 1.0 :
205+ flood_cost = 1.0
206+ elif flood_cost <= 0.0 :
207+ flood_cost = float (max (data ['peak_filling_degrees' ]))
208+ else :
209+ flood_cost = float (flood_cost )
210+
211+ drainage_cost = 0.0
212+ for node_id , series in data ['data_log' ]['depthN' ].items ():
213+ if '5' in node_id or '9' in node_id :
214+ continue
215+ final_depth = series [- 1 ]
216+ if final_depth > 0.10 :
217+ drainage_cost += 10.0 * final_depth
218+
219+ constraint_cost = float (flood_cost + drainage_cost )
220+
221+ Sim_cf .computed_return_values [key ] = {
222+ "objective" : objective_cost ,
223+ "constraint" : constraint_cost ,
224+ }
225+ return Sim_cf .computed_return_values [key ]
226+
175227class Sim_cf :
176228 threshold = 0.99 # on the constraint function to define the feasible (safe) region
177229 computed_return_values = dict ()
178230
231+ @staticmethod
232+ def objective (input_data ):
233+ return_values = []
234+ for sample in input_data :
235+ res = evaluate_cf_point (sample .numpy ())
236+ return_values .append (res ["objective" ])
237+ return np .array (return_values ).reshape (- 1 , 1 )
238+
239+ @staticmethod
240+ def constraint (input_data ):
241+ return_values = []
242+ for sample in input_data :
243+ res = evaluate_cf_point (sample .numpy ())
244+ return_values .append (res ["constraint" ])
245+ return np .array (return_values ).reshape (- 1 , 1 )
246+ '''
179247 @staticmethod
180248 def objective(input_data):
181249 return_values = []
@@ -263,6 +331,7 @@ def constraint(input_data):
263331 return_values.append(flood_cost)
264332 return_values = np.array(return_values).reshape(-1,1)
265333 return return_values
334+ '''
266335
267336OBJECTIVE = "OBJECTIVE"
268337CONSTRAINT = "CONSTRAINT"
@@ -406,7 +475,7 @@ def observer_efd(query_points):
406475 }
407476
408477def create_bo_model (data ):
409- gpr = build_gpr (data , search_space )
478+ gpr = build_gpr (data , search_space , likelihood_variance = 1e-7 )
410479 return GaussianProcessRegression (gpr )
411480
412481if evaluating == "constant-flow" and mode == "optimize" :
@@ -670,10 +739,17 @@ def combined_objective(params):
670739 function_call_count += 1
671740
672741 params_array = np .array (params ).flatten ()
673-
742+ res = evaluate_cf_point (params_array )
743+ # If the constraint is violated, return a large penalty
744+ if res ['constraint' ] > Sim_cf .threshold :
745+ return 1e6 * (res ['constraint' ] - Sim_cf .threshold ) # if all initial samples have the same value some methods will error out.
746+ return float (res ['objective' ])
747+
748+ '''
674749 # Use existing simulation infrastructure to evaluate
675750 data = run_swmm(params_array, None, verbose=False)
676751 return sum(data['data_log']['performance_measure'])
752+ '''
677753
678754 # For tracking performance across methods
679755 results = {
@@ -684,9 +760,9 @@ def combined_objective(params):
684760 }
685761
686762 # Generate common initial points for all methods
687- num_initial_points = 50 # Number of initial points
688- num_steps = 200
689- initial_seed = 7 # Use fixed seed for reproducibility
763+ num_initial_points = 5 #25 # Number of initial points
764+ num_steps = 5 #25
765+ initial_seed = 42 # Use fixed seed for reproducibility
690766 tf .random .set_seed (initial_seed )
691767 np .random .seed (initial_seed )
692768
@@ -724,46 +800,46 @@ def combined_objective(params):
724800 datasets = opt_result .try_get_final_datasets ()
725801 obj_data = datasets [OBJECTIVE ]
726802 con_data = datasets [CONSTRAINT ]
803+ models = opt_result .try_get_final_models () # capture models (not used for 9D plotting)
727804 bouc_fcalls = len (obj_data .query_points )
728- # Process BOUC results and calculate pystorms costs
805+
806+ # Process BOUC results and compute combined costs for each point (no re-simulation)
807+ bouc_combined_costs = []
729808 for i in range (len (obj_data .query_points )):
730809 point = obj_data .query_points [i ].numpy ()
731-
732- # Calculate pystorms cost for this point
733- data = run_swmm (point , None , verbose = False )
734- pystorms_cost = sum (data ['data_log' ]['performance_measure' ])
735- bouc_pystorms_costs .append (pystorms_cost )
736-
737- # Record original BOUC objective value
810+
811+ combined_cost = combined_objective (point )
812+ bouc_combined_costs .append (combined_cost )
813+
814+ # Record original BOUC objective/constraint
738815 obj_value = float (obj_data .observations [i ][0 ])
739816 con_value = float (con_data .observations [i ][0 ])
740-
817+
741818 # Track best feasible point according to BOUC's objective
742819 if con_value <= Sim_cf .threshold and obj_value < best_feasible_obj :
743820 best_feasible_obj = obj_value
744821 best_feasible_point = point
745- best_feasible_idx = i # Store the index here for later reference
746-
822+ best_feasible_idx = i
823+
747824 bouc_costs .append (obj_value )
748825 bouc_fcalls_arr .append (i + 1 )
749826
750827 # Store results for comparison
751828 results ["BOUC" ]["costs" ] = bouc_costs
752- results ["BOUC" ]["pystorms_costs " ] = bouc_pystorms_costs
829+ results ["BOUC" ]["combined_costs " ] = bouc_combined_costs
753830 results ["BOUC" ]["fcalls" ] = bouc_fcalls_arr
754-
755- # Use the already calculated pystorms cost for the best point
831+
832+ # Use the combined cost for the best point
756833 if best_feasible_point is not None :
757834 results ["BOUC" ]["best_params" ] = best_feasible_point
758- results ["BOUC" ]["best_cost" ] = bouc_pystorms_costs [best_feasible_idx ]
835+ results ["BOUC" ]["best_cost" ] = bouc_combined_costs [best_feasible_idx ]
759836 else :
760- # If no feasible point, use the best objective point
761- idx = np .argmin (obj_data .observations )
837+ idx = int (np .argmin (bouc_combined_costs ))
762838 results ["BOUC" ]["best_params" ] = obj_data .query_points [idx ].numpy ()
763- results ["BOUC" ]["best_cost" ] = bouc_pystorms_costs [idx ]
839+ results ["BOUC" ]["best_cost" ] = bouc_combined_costs [idx ]
764840
765841 print ("BOUC function calls:" , bouc_fcalls_arr )
766- print ("BOUC pystorms costs:" , bouc_pystorms_costs )
842+ print ("BOUC combined costs:" , bouc_combined_costs )
767843
768844 # 2. Vanilla Bayesian Optimization using Trieste
769845 print ("\n Running vanilla BO optimization..." )
@@ -791,7 +867,7 @@ def observer_vanilla_bo(query_points):
791867 return Dataset (query_points , Sim_vanilla_bo .objective (query_points ))
792868
793869 def vanilla_bo_create_model (data ):
794- gpr = build_gpr (data , search_space )
870+ gpr = build_gpr (data , search_space , likelihood_variance = 1e-7 )
795871 return GaussianProcessRegression (gpr )
796872
797873 initial_data_bo = observer_vanilla_bo (initial_points )
@@ -940,8 +1016,8 @@ def de_callback(x, convergence):
9401016
9411017 # Compare results
9421018 print ("\n Optimization Results Comparison (using consistent performance measure):" )
943- print ("Starting costs (from pystorms performance measure ):" )
944- print (f" BOUC: { bouc_pystorms_costs [0 ]:.4f} , BO: { bo_costs [0 ]:.4f} , DA: { da_costs [0 ]:.4f} , DE: { de_costs [0 ]:.4f} " )
1019+ print ("Starting costs (combined objective ):" )
1020+ print (f" BOUC: { bouc_combined_costs [0 ]:.4f} , BO: { bo_costs [0 ]:.4f} , DA: { da_costs [0 ]:.4f} , DE: { de_costs [0 ]:.4f} " )
9451021 print ("-" * 80 )
9461022 print (f"{ 'Method' :<10} | { 'Best Cost' :<15} | { 'Function Calls' :<15} | { 'Time (s)' :<15} " )
9471023 print ("-" * 80 )
@@ -1021,7 +1097,7 @@ def rolling_min(costs, feasible=None):
10211097 return best
10221098
10231099 # For BOUC, use pystorms cost directly
1024- results ["BOUC" ]["best_cost_so_far" ] = rolling_min (results ["BOUC" ]["pystorms_costs " ])
1100+ results ["BOUC" ]["best_cost_so_far" ] = rolling_min (results ["BOUC" ]["combined_costs " ])
10251101
10261102 # For other methods, all are assumed feasible
10271103 for method in ["BO" , "DA" , "DE" ]:
0 commit comments