Skip to content

Commit b4205d3

Browse files
committed
Cleans up, fix some linting issues
1 parent 50a8e31 commit b4205d3

1 file changed

Lines changed: 31 additions & 32 deletions

File tree

climada/engine/option_appraisal/appraiser.py

Lines changed: 31 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@
2222
import datetime
2323
import logging
2424
import warnings
25-
from typing import Iterable
25+
from typing import Iterable, cast
2626

2727
import matplotlib.dates as mdates
2828
import matplotlib.patches as mpatches
@@ -322,12 +322,14 @@ def __init__(
322322
interpolation_strategy: ImpactInterpolationStrategy | None = None,
323323
impact_computation_strategy: ImpactComputationStrategy | None = None,
324324
):
325-
"""Initialize a new `StaticRiskTrajectory`.
325+
"""Initialize a new `InterpolatedAppraiser`.
326326
327327
Parameters
328328
----------
329329
snapshot_list : list[Snapshot]
330330
The list of `Snapshot` object to compute risk from.
331+
measure_set : MeasureSet
332+
The set of adaptation measures to appraise.
331333
return_periods: list[int], optional
332334
The return periods to use when computing the `return_periods_metric`.
333335
Defaults to `DEFAULT_RP` ([20, 50, 100]).
@@ -336,9 +338,6 @@ def __init__(
336338
It must be a valid pandas string used to define periods,
337339
e.g., "Y" for years, "M" for months, "3M" for trimester, etc.
338340
Defaults to `DEFAULT_TIME_RESOLUTION` ("Y").
339-
all_groups_name: str, optional
340-
The string to use to define all exposure points subgroup.
341-
Defaults to `DEFAULT_ALLGROUP_NAME` ("All").
342341
risk_disc_rates: DiscRates, optional
343342
The discount rate to apply to future risk. Defaults to None.
344343
interpolation_strategy: InterpolationStrategyBase, optional
@@ -406,32 +405,30 @@ def _generic_metrics(
406405
) -> pd.DataFrame:
407406
LOGGER.debug("Computing base metric: %s.", metric_name)
408407
base_metrics = super()._generic_metrics(metric_name, metric_meth, **kwargs)
409-
if base_metrics is not None:
410-
LOGGER.debug("Computing averted risk for: %s.", metric_name)
411-
base_metrics = self._calc_averted(base_metrics)
412-
no_measures = base_metrics[
413-
base_metrics[MEASURE_COL_NAME] == NO_MEASURE_VALUE
414-
].copy()
415-
no_measures[REFERENCE_RISK_NAME] = no_measures[RISK_COL_NAME]
416-
no_measures[AVERTED_RISK_NAME] = 0.0
417-
no_measures[MEASURE_NET_COST_NAME] = 0.0
418-
LOGGER.debug("Computing cash flow for: %s.", metric_name)
419-
cash_flow_metrics = self.annual_cash_flows()
420-
LOGGER.debug("Merging with base metric: %s.", metric_name)
421-
base_metrics = base_metrics.merge(
422-
cash_flow_metrics[
423-
[DATE_COL_NAME, MEASURE_COL_NAME, MEASURE_NET_COST_NAME]
424-
],
425-
on=[MEASURE_COL_NAME, DATE_COL_NAME],
426-
)
427-
LOGGER.debug("Merging with no measure: %s.", metric_name)
428-
base_metrics = pd.concat([no_measures, base_metrics])
429408

430-
if measures is not None:
431-
base_metrics = base_metrics.loc[
432-
base_metrics[MEASURE_COL_NAME].isin(measures)
433-
].reset_index()
409+
LOGGER.debug("Computing averted risk for: %s.", metric_name)
410+
base_metrics = self._calc_averted(base_metrics)
411+
no_measures = base_metrics[
412+
base_metrics[MEASURE_COL_NAME] == NO_MEASURE_VALUE
413+
].copy()
414+
no_measures[REFERENCE_RISK_NAME] = no_measures[RISK_COL_NAME]
415+
no_measures[AVERTED_RISK_NAME] = 0.0
416+
no_measures[MEASURE_NET_COST_NAME] = 0.0
417+
LOGGER.debug("Computing cash flow for: %s.", metric_name)
418+
cash_flow_metrics = self.annual_cash_flows()
419+
LOGGER.debug("Merging with base metric: %s.", metric_name)
420+
base_metrics = base_metrics.merge(
421+
cash_flow_metrics[[DATE_COL_NAME, MEASURE_COL_NAME, MEASURE_NET_COST_NAME]],
422+
on=[MEASURE_COL_NAME, DATE_COL_NAME],
423+
)
424+
LOGGER.debug("Merging with no measure: %s.", metric_name)
425+
base_metrics = pd.concat([no_measures, base_metrics])
434426

427+
if measures is not None:
428+
col = cast(pd.Series, base_metrics[MEASURE_COL_NAME]) # For LSP
429+
base_metrics = base_metrics.loc[col.isin(measures)].reset_index()
430+
431+
base_metrics = cast(pd.DataFrame, base_metrics) # For LSP
435432
return base_metrics
436433

437434
@staticmethod
@@ -491,7 +488,9 @@ def per_date_CB(
491488
if not include_no_measure:
492489
metrics_df = metrics_df[metrics_df[MEASURE_COL_NAME] != NO_MEASURE_VALUE]
493490

494-
metrics_df.rename(columns={RISK_COL_NAME: RESIDUAL_RISK_NAME}, inplace=True)
491+
metrics_df.rename(columns={RISK_COL_NAME: RESIDUAL_RISK_NAME}, inplace=True) # type: ignore
492+
# (Dict[str,str] not accepted as a Mapping for some reason)
493+
495494
metrics_df["cumulated measure cost"] = metrics_df.groupby(
496495
[GROUP_COL_NAME, MEASURE_COL_NAME, METRIC_COL_NAME], observed=True
497496
)[MEASURE_NET_COST_NAME].cumsum()
@@ -511,7 +510,6 @@ def per_period_CB(
511510
RETURN_PERIOD_METRIC_NAME,
512511
AAI_PER_GROUP_METRIC_NAME,
513512
],
514-
npv: bool = True,
515513
include_no_measure=False,
516514
**kwargs,
517515
) -> pd.DataFrame | pd.Series:
@@ -562,6 +560,7 @@ def annual_cash_flows(self):
562560
DATE_COL_NAME: "first",
563561
}
564562
)
563+
df = cast(pd.DataFrame, df) # LSP
565564
df[MEASURE_COL_NAME] = meas_name
566565
res.append(df)
567566
df = pd.concat(res)
@@ -642,7 +641,7 @@ def plot_per_date_waterfall_CB(
642641
sharex=False,
643642
sharey=False,
644643
)
645-
self.plot_time_waterfall(ax=axs[0], start_date=start_date, end_date=end_date)
644+
self.plot_time_waterfall(ax=axs[0])
646645

647646
for i, measure in enumerate(measures):
648647
ax = axs[i + 1]

0 commit comments

Comments
 (0)