Skip to content

Commit ef28d95

Browse files
committed
refactor: update PLR ATE tuning coverage simulation with loss metrics and reduce repetitions
1 parent 899ff88 commit ef28d95

File tree

6 files changed

+19
-43
lines changed

6 files changed

+19
-43
lines changed

doc/plm/plr.qmd

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -233,7 +233,7 @@ df_tune_cov = pd.read_csv("../../results/plm/plr_ate_tune_coverage.csv", index_c
233233
assert df_tune_cov["repetition"].nunique() == 1
234234
n_rep_tune_cov = df_tune_cov["repetition"].unique()[0]
235235
236-
display_columns_tune_cov = ["Learner g", "Learner m", "Tuned", "Bias", "CI Length", "Coverage",]
236+
display_columns_tune_cov = ["Learner g", "Learner m", "Tuned", "Bias", "CI Length", "Coverage", "Loss g", "Loss m"]
237237
```
238238

239239

@@ -248,7 +248,7 @@ generate_and_show_styled_table(
248248
display_cols=display_columns_tune_cov,
249249
n_rep=n_rep_tune_cov,
250250
level_col="level",
251-
rename_map={"Learner g": "Learner l"},
251+
rename_map={"Learner g": "Learner l", "Loss g": "Loss l"},
252252
coverage_highlight_cols=["Coverage"]
253253
)
254254
```
@@ -262,7 +262,7 @@ generate_and_show_styled_table(
262262
display_cols=display_columns_tune_cov,
263263
n_rep=n_rep_tune_cov,
264264
level_col="level",
265-
rename_map={"Learner g": "Learner l"},
265+
rename_map={"Learner g": "Learner l", "Loss g": "Loss l"},
266266
coverage_highlight_cols=["Coverage"]
267267
)
268268
```

monte-cover/src/montecover/plm/plr_ate_tune.py

Lines changed: 8 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66

77
from montecover.base import BaseSimulation
88
from montecover.utils import create_learner_from_config
9+
from montecover.utils_tuning import lgbm_reg_params
910

1011

1112
class PLRATETuningCoverageSimulation(BaseSimulation):
@@ -29,40 +30,10 @@ def __init__(
2930
self._calculate_oracle_values()
3031

3132
# tuning specific settings
32-
# parameter space for the outcome regression tuning
33-
def ml_l_params(trial):
34-
return {
35-
"n_estimators": trial.suggest_int("n_estimators", 100, 500, step=50),
36-
"learning_rate": trial.suggest_float(
37-
"learning_rate", 1e-3, 0.1, log=True
38-
),
39-
"min_child_samples": trial.suggest_int(
40-
"min_child_samples", 20, 100, step=5
41-
),
42-
"max_depth": trial.suggest_int("max_depth", 3, 10, step=1),
43-
"lambda_l1": trial.suggest_float("lambda_l1", 1e-8, 10.0, log=True),
44-
"lambda_l2": trial.suggest_float("lambda_l2", 1e-8, 10.0, log=True),
45-
}
46-
47-
# parameter space for the propensity score tuning
48-
def ml_m_params(trial):
49-
return {
50-
"n_estimators": trial.suggest_int("n_estimators", 100, 500, step=50),
51-
"learning_rate": trial.suggest_float(
52-
"learning_rate", 1e-3, 0.1, log=True
53-
),
54-
"min_child_samples": trial.suggest_int(
55-
"min_child_samples", 20, 100, step=5
56-
),
57-
"max_depth": trial.suggest_int("max_depth", 3, 10, step=1),
58-
"lambda_l1": trial.suggest_float("lambda_l1", 1e-8, 10.0, log=True),
59-
"lambda_l2": trial.suggest_float("lambda_l2", 1e-8, 10.0, log=True),
60-
}
61-
62-
self._param_space = {"ml_l": ml_l_params, "ml_m": ml_m_params}
33+
self._param_space = {"ml_l": lgbm_reg_params, "ml_m": lgbm_reg_params}
6334

6435
self._optuna_settings = {
65-
"n_trials": 500,
36+
"n_trials": 200,
6637
"show_progress_bar": False,
6738
"verbosity": optuna.logging.WARNING, # Suppress Optuna logs
6839
}
@@ -121,6 +92,7 @@ def run_single_rep(self, dml_data, dml_params) -> Dict[str, Any]:
12192
"coverage": [],
12293
}
12394
for model in [dml_model, dml_model_tuned]:
95+
nuisance_loss = model.nuisance_loss
12496
for level in self.confidence_parameters["level"]:
12597
level_result = dict()
12698
level_result["coverage"] = self._compute_coverage(
@@ -139,6 +111,8 @@ def run_single_rep(self, dml_data, dml_params) -> Dict[str, Any]:
139111
"Score": score,
140112
"level": level,
141113
"Tuned": model is dml_model_tuned,
114+
"Loss g": nuisance_loss["ml_l"].mean() if score == "partialling out" else nuisance_loss["ml_g"].mean(),
115+
"Loss m": nuisance_loss["ml_m"].mean(),
142116
}
143117
)
144118
for key, res in level_result.items():
@@ -156,6 +130,8 @@ def summarize_results(self):
156130
"Coverage": "mean",
157131
"CI Length": "mean",
158132
"Bias": "mean",
133+
"Loss g": "mean",
134+
"Loss m": "mean",
159135
"repetition": "count",
160136
}
161137

results/plm/plr_ate_tune_config.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
simulation_parameters:
2-
repetitions: 500
2+
repetitions: 200
33
max_runtime: 19800
44
random_seed: 42
55
n_jobs: -2
Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
1-
Learner g,Learner m,Score,level,Tuned,Coverage,CI Length,Bias,repetition
2-
LGBM Regr.,LGBM Regr.,partialling out,0.9,False,0.81,0.1479047026981892,0.04545068026009308,500
3-
LGBM Regr.,LGBM Regr.,partialling out,0.9,True,0.894,0.1451364370890545,0.037424408565466506,500
4-
LGBM Regr.,LGBM Regr.,partialling out,0.95,False,0.898,0.17623932347696217,0.04545068026009308,500
5-
LGBM Regr.,LGBM Regr.,partialling out,0.95,True,0.942,0.1729407315508218,0.037424408565466506,500
1+
Learner g,Learner m,Score,level,Tuned,Coverage,CI Length,Bias,Loss g,Loss m,repetition
2+
LGBM Regr.,LGBM Regr.,partialling out,0.9,False,0.805,0.14733663739723357,0.04709716445165978,1.2426113404810248,1.1115356809360686,200
3+
LGBM Regr.,LGBM Regr.,partialling out,0.9,True,0.845,0.14434980329449199,0.03915064046361367,1.1704317749756923,1.06174632188426,200
4+
LGBM Regr.,LGBM Regr.,partialling out,0.95,False,0.87,0.17556243192108348,0.04709716445165978,1.2426113404810248,1.1115356809360686,200
5+
LGBM Regr.,LGBM Regr.,partialling out,0.95,True,0.895,0.17200339957118416,0.03915064046361367,1.1704317749756923,1.06174632188426,200
Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,2 +1,2 @@
11
DoubleML Version,Script,Date,Total Runtime (minutes),Python Version,Config File
2-
0.12.dev0,PLRATETuningCoverageSimulation,2025-11-24 15:37,119.77829658587774,3.12.9,scripts/plm/plr_ate_tune_config.yml
2+
0.12.dev0,PLRATETuningCoverageSimulation,2025-12-01 13:43,18.97071567773819,3.12.9,scripts/plm/plr_ate_tune_config.yml

scripts/plm/plr_ate_tune_config.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
# Simulation parameters for PLR ATE Coverage
22

33
simulation_parameters:
4-
repetitions: 500
4+
repetitions: 200
55
max_runtime: 19800 # 5.5 hours in seconds
66
random_seed: 42
77
n_jobs: -2

0 commit comments

Comments
 (0)