Skip to content

Commit 2334050

Browse files
committed
add LightGBM parameter tuning functions for regression and classification
1 parent 3525aeb commit 2334050

File tree

2 files changed

+28
-35
lines changed

2 files changed

+28
-35
lines changed

monte-cover/src/montecover/irm/irm_ate_tune.py

Lines changed: 2 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66

77
from montecover.base import BaseSimulation
88
from montecover.utils import create_learner_from_config
9+
from montecover.utils_tuning import lgbm_reg_params, lgbm_cls_params
910

1011

1112
class IRMATETuningCoverageSimulation(BaseSimulation):
@@ -29,41 +30,7 @@ def __init__(
2930
self._calculate_oracle_values()
3031

3132
# tuning specific settings
32-
# parameter space for the outcome regression tuning
33-
def ml_g_params(trial):
34-
return {
35-
"n_estimators": trial.suggest_int("n_estimators", 100, 500, step=50),
36-
"learning_rate": trial.suggest_float(
37-
"learning_rate", 1e-3, 0.1, log=True
38-
),
39-
"min_child_samples": trial.suggest_int(
40-
"min_child_samples", 10, 50, step=5
41-
),
42-
"max_depth": 3,
43-
"feature_fraction": trial.suggest_float("feature_fraction", 0.6, 1),
44-
"bagging_fraction": trial.suggest_float("bagging_fraction", 0.6, 1),
45-
"lambda_l1": trial.suggest_float("lambda_l1", 1e-8, 10.0, log=True),
46-
"lambda_l2": trial.suggest_float("lambda_l2", 1e-8, 10.0, log=True),
47-
}
48-
49-
# parameter space for the propensity score tuning
50-
def ml_m_params(trial):
51-
return {
52-
"n_estimators": trial.suggest_int("n_estimators", 100, 500, step=50),
53-
"learning_rate": trial.suggest_float(
54-
"learning_rate", 1e-3, 0.1, log=True
55-
),
56-
"min_child_samples": trial.suggest_int(
57-
"min_child_samples", 10, 50, step=5
58-
),
59-
"max_depth": 3,
60-
"feature_fraction": trial.suggest_float("feature_fraction", 0.6, 1),
61-
"bagging_fraction": trial.suggest_float("bagging_fraction", 0.6, 1),
62-
"lambda_l1": trial.suggest_float("lambda_l1", 1e-8, 10.0, log=True),
63-
"lambda_l2": trial.suggest_float("lambda_l2", 1e-8, 10.0, log=True),
64-
}
65-
66-
self._param_space = {"ml_g": ml_g_params, "ml_m": ml_m_params}
33+
self._param_space = {"ml_g": lgbm_reg_params, "ml_m": lgbm_cls_params}
6734

6835
self._optuna_settings = {
6936
"n_trials": 500,
Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,26 @@
1+
def lgbm_reg_params(trial):
2+
"""Parameter space for LightGBM regression tuning."""
3+
return {
4+
"n_estimators": trial.suggest_int("n_estimators", 100, 500, step=50),
5+
"learning_rate": trial.suggest_float("learning_rate", 1e-3, 0.1, log=True),
6+
"min_child_samples": trial.suggest_int("min_child_samples", 10, 50, step=5),
7+
"max_depth": 3,
8+
"feature_fraction": trial.suggest_float("feature_fraction", 0.6, 1),
9+
"bagging_fraction": trial.suggest_float("bagging_fraction", 0.6, 1),
10+
"lambda_l1": trial.suggest_float("lambda_l1", 1e-8, 10.0, log=True),
11+
"lambda_l2": trial.suggest_float("lambda_l2", 1e-8, 10.0, log=True),
12+
}
13+
14+
15+
def lgbm_cls_params(trial):
16+
"""Parameter space for LightGBM classification tuning."""
17+
return {
18+
"n_estimators": trial.suggest_int("n_estimators", 100, 500, step=50),
19+
"learning_rate": trial.suggest_float("learning_rate", 1e-3, 0.1, log=True),
20+
"min_child_samples": trial.suggest_int("min_child_samples", 10, 50, step=5),
21+
"max_depth": 3,
22+
"feature_fraction": trial.suggest_float("feature_fraction", 0.6, 1),
23+
"bagging_fraction": trial.suggest_float("bagging_fraction", 0.6, 1),
24+
"lambda_l1": trial.suggest_float("lambda_l1", 1e-8, 10.0, log=True),
25+
"lambda_l2": trial.suggest_float("lambda_l2", 1e-8, 10.0, log=True),
26+
}

0 commit comments

Comments
 (0)