diff --git a/appabuild/config/lca.py b/appabuild/config/lca.py index 5a3b4b8..762f5e5 100644 --- a/appabuild/config/lca.py +++ b/appabuild/config/lca.py @@ -25,6 +25,10 @@ class Model(BaseModel): name: name of the yaml file corresponding to the impact model (do not include file extension). path: output folder for saving impact model. compile: if True, precompute the symbolic expressions needed by Appa Run and store them in the impact model. + activities_name_to_include: list of activities name regexes to use as an + alternative to the include_in_tree activity flag. Any activity whose name matches + one of the regex in the list will be included in the impact model. This disables + the include_in_tree activity flag. metadata: information about the impact model, meant to help the user of it to better understand the LCA leading to the impact model. parameters: information about all free parameters needed by the functional unit of the impact model. """ @@ -33,6 +37,7 @@ class Model(BaseModel): path: Optional[str] = "." compile: bool metadata: Optional[ModelMetadata] = None + activities_name_to_include: Optional[List[str]] = None parameters: Optional[List[ImpactModelParam]] = [] @field_validator("parameters", mode="before") diff --git a/appabuild/database/databases.py b/appabuild/database/databases.py index 0df0e76..58d1841 100644 --- a/appabuild/database/databases.py +++ b/appabuild/database/databases.py @@ -13,9 +13,9 @@ import bw2data as bd import bw2io as bi +import lca_algebraic as lcaa import yaml from apparun.parameters import ImpactModelParams -from lca_algebraic import resetParams, setForeground from lxml.etree import XMLSyntaxError from pydantic_core import ValidationError @@ -48,7 +48,7 @@ def execute_at_startup(self) -> None: already present in Brightway project. :return: """ - resetParams(self.name) + return @abc.abstractmethod def import_in_project(self) -> None: @@ -284,7 +284,7 @@ def find_activities_on_disk(self) -> None: def execute_at_startup(self): if self.name in bd.databases: - resetParams(self.name) + lcaa.resetParams() del bd.databases[self.name] self.find_activities_on_disk() @@ -323,4 +323,4 @@ def import_in_project(self) -> None: activity.to_bw_format() for activity in self.context.activities ] bw_database.write(dict(to_write_activities)) - setForeground(self.name) + lcaa.setForeground(self.name) diff --git a/appabuild/database/user_database_elements.py b/appabuild/database/user_database_elements.py index 2d76fed..b55b07e 100644 --- a/appabuild/database/user_database_elements.py +++ b/appabuild/database/user_database_elements.py @@ -399,7 +399,6 @@ def from_serialized_activity( ] ) > 0 - and serialized_activity.include_in_tree ): amount_of_copies = len( [ diff --git a/appabuild/model/builder.py b/appabuild/model/builder.py index f8e9158..f520c61 100644 --- a/appabuild/model/builder.py +++ b/appabuild/model/builder.py @@ -8,6 +8,7 @@ import itertools import logging import os +import re from typing import List, Optional, Set, Tuple import bw2data as bd @@ -72,6 +73,7 @@ def __init__( functional_unit: str, methods: list[str], output_path: str, + activities_name_to_include: Optional[List[str]] = None, metadata: Optional[ModelMetadata] = ModelMetadata(), parameters: Optional[ImpactModelParams] = None, ): @@ -84,6 +86,10 @@ def __init__( :param metadata: information about the LCA behind the impact model. Should contain, or link to all information necessary for the end user's proper understanding of the impact model. + :param activities_name_to_include: list of activities name regexes to use as an + alternative to the include_in_tree activity flag. Any activity whose name matches + one of the regex in the list will be included in the impact model. This disables + the include_in_tree activity flag. :param parameters: an ImpactModelParam object will have to be created for each parameter used in all used datasets. See ImpactModelParam attributes to know required fields. @@ -92,6 +98,7 @@ def __init__( self.functional_unit = functional_unit self.parameters = parameters self.methods = methods + self.activities_name_to_include = activities_name_to_include self.metadata = metadata self.output_path = output_path self.bw_user_database = bd.Database(self.user_database_name) @@ -113,6 +120,7 @@ def from_yaml(lca_config_path: str) -> ImpactModelBuilder: lca_config.model.path, lca_config.model.name + ".yaml", ), + lca_config.model.activities_name_to_include, lca_config.model.metadata, ImpactModelParams.from_list(lca_config.model.parameters), ) @@ -122,7 +130,8 @@ def build_impact_model( self, foreground_database: Optional[ForegroundDatabase] = None ) -> ImpactModel: """ - Build an Impact Model, the model is a represented as a tree with the functional unit as its root + Build an Impact Model, the model is a represented as a tree with the functional + unit as its root. :param foreground_database: database containing the functional unit :return: built impact model. """ @@ -259,7 +268,17 @@ def build_tree_node(self, tree_node: ImpactTreeNode): e = f"Found recursive activity: {sub_act.get('name')}" logger.exception(e) raise ForegroundDatabaseError(e) - if sub_act.get("include_in_tree"): + if self.activities_name_to_include is not None: + regexes = [ + re.compile(activity_name_to_include) + for activity_name_to_include in self.activities_name_to_include + ] + sub_act_to_include = any( + regex.match(sub_act.get("name")) for regex in regexes + ) + else: + sub_act_to_include = sub_act.get("include_in_tree") + if sub_act_to_include: amount = _getAmountOrFormula(exch) child_tree_node = tree_node.new_child( name=sub_act["name"], diff --git a/samples/datasets/user_database/nvidia_ai_gpu/nvidia_gpu_die_manufacturing.yaml b/samples/datasets/user_database/nvidia_ai_gpu/nvidia_gpu_die_manufacturing.yaml index f5a0715..8727552 100644 --- a/samples/datasets/user_database/nvidia_ai_gpu/nvidia_gpu_die_manufacturing.yaml +++ b/samples/datasets/user_database/nvidia_ai_gpu/nvidia_gpu_die_manufacturing.yaml @@ -6,6 +6,7 @@ amount: 1 parameters: - cuda_core - architecture +include_in_tree: True comment: "NVIDIA GPU die for Pascal and Maxwell architectures. Number of CUDA cores is used to estimate die area and technology node. Information for modelisation can be found on techpowerup.com (https://www.techpowerup.com/gpu-specs/nvidia-gp108.g808 for example). We assume dies are manufactured in Taiwan. We assume (arbitrarily) a defect density of ?" exchanges: - database: user_database diff --git a/tests/data/cmd_build/nvidia_ai_gpu_chip_lca_conf_no_include_in_tree.yaml b/tests/data/cmd_build/nvidia_ai_gpu_chip_lca_conf_no_include_in_tree.yaml new file mode 100644 index 0000000..abe3c36 --- /dev/null +++ b/tests/data/cmd_build/nvidia_ai_gpu_chip_lca_conf_no_include_in_tree.yaml @@ -0,0 +1,66 @@ +scope: + fu: + name: 'nvidia_ai_gpu_chip' + database: "user_database" + methods: + - "EFV3_CLIMATE_CHANGE" +model: + path: "." + name: "nvidia_ai_gpu_chip" + compile: True + activities_name_to_include: + - "nvidia_ai_gpu_chip" + - ".*_manufacturing_?[0-9]*$" #the _?[0-9]* is important here as some activities downstream the switch in nvidia_gpu_die_manufacturing will be duplicated with a "_{number_of_duplicate}" suffix + metadata: + author: + name: Maxime PERALTA + organization: CEA + mail: maxime.peralta@cea.fr + reviewer: + name: Mathias TORCASO + organization: CEA + mail: + report: + link: https://appalca.github.io/ + description: "A mock example of Appa LCA's impact model corresponding to a fictive AI chip accelerator based on NVIDIA GPU." + date: 07/10/2025 + version: "1" + license: proprietary + appabuild_version: "0.3.6" + parameters: + - name: cuda_core + type: float + default: + architecture: + Maxwell: 1344 + Pascal: 1280 + min: 256 + max: 4096 + - name: architecture + type: enum + default: Maxwell + weights: + Maxwell: 1 + Pascal: 1 + - name: usage_location + type: enum + default: FR + weights: + FR: 1 + EU: 1 + - name: energy_per_inference + type: float + default: + architecture: #we model energy by inference using a model of the TDP function of the number of cuda cores. We suppose that TDP power corresponds to running inferences at 90fps. + Maxwell: "0.0878*cuda_core*1000/(90*3600)" + Pascal: "0.0679*cuda_core*1000/(90*3600)" + pm_perc: 0.2 + - name: lifespan + type: float + default: 2.0 + pm: 1 + - name: inference_per_day + type: float + default: "30*3600*8" #30fps 8 hours a day + min: 0 + max: 86400000 \ No newline at end of file diff --git a/tests/end_to_end/test_cmd_build.py b/tests/end_to_end/test_cmd_build.py index 4ee9d70..b658210 100644 --- a/tests/end_to_end/test_cmd_build.py +++ b/tests/end_to_end/test_cmd_build.py @@ -1,5 +1,5 @@ """ -Test that the command appabuild lca build works correctly with a simple example. +Test that the command appabuild lca build works correctly with a simple example using CLI. """ import os diff --git a/tests/end_to_end/test_python_build.py b/tests/end_to_end/test_python_build.py new file mode 100644 index 0000000..3f8ebd9 --- /dev/null +++ b/tests/end_to_end/test_python_build.py @@ -0,0 +1,73 @@ +""" +Test that the command appabuild lca build works correctly with a simple example using +Python API. +""" + +import os + +import pytest +import yaml +from apparun.impact_model import ImpactModel +from typer.testing import CliRunner + +from appabuild.cli.lca import build +from tests import DATA_DIR + +runner = CliRunner() + + +def test_build_with_include_in_tree(): + appaconf_file = os.path.join(DATA_DIR, "cmd_build", "appalca_conf_wo_ei.yaml") + conf_file = os.path.join(DATA_DIR, "cmd_build", "nvidia_ai_gpu_chip_lca_conf.yaml") + expected_file = os.path.join( + DATA_DIR, "cmd_build", "nvidia_ai_gpu_chip_expected.yaml" + ) + expected_scores_file = os.path.join(DATA_DIR, "cmd_build", "expected_scores.yaml") + build(appaconf_file, conf_file) + + # Check the generated impact model is the same as expected + with open(expected_file, "r") as stream: + expected = yaml.safe_load(stream) + + with open("nvidia_ai_gpu_chip.yaml", "r") as stream: + value = yaml.safe_load(stream) + + assert expected == value, "result file not the same as expected file " + + # Check that the generated impact model can be run by Appa Run + model = ImpactModel.from_yaml("nvidia_ai_gpu_chip.yaml") + scores = model.get_nodes_scores() + scores = { + score.name: score.lcia_scores.scores["EFV3_CLIMATE_CHANGE"][0] + for score in scores + } + with open(expected_scores_file, "r") as stream: + expected_scores = yaml.safe_load(stream) + + assert scores == pytest.approx(expected_scores) + os.remove("nvidia_ai_gpu_chip.yaml") + + +def test_build_wo_include_in_tree(): + appaconf_file = os.path.join(DATA_DIR, "cmd_build", "appalca_conf_wo_ei.yaml") + conf_file = os.path.join( + DATA_DIR, "cmd_build", "nvidia_ai_gpu_chip_lca_conf_no_include_in_tree.yaml" + ) + expected_scores_file = os.path.join(DATA_DIR, "cmd_build", "expected_scores.yaml") + build(appaconf_file, conf_file) + + model = ImpactModel.from_yaml("nvidia_ai_gpu_chip.yaml") + assert len(model.tree.unnested_descendants) == 11 + + # Check that the generated impact model can be run by Appa Run + scores = model.get_nodes_scores() + scores = { + score.name: score.lcia_scores.scores["EFV3_CLIMATE_CHANGE"][0] + for score in scores + } + with open(expected_scores_file, "r") as stream: + expected_scores = yaml.safe_load(stream) + + assert scores["nvidia_ai_gpu_chip"] == pytest.approx( + expected_scores["nvidia_ai_gpu_chip"] + )