Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions docs/CONFIGURATION.md
Original file line number Diff line number Diff line change
Expand Up @@ -113,6 +113,7 @@ spec:
mp_instance_type: r6a.2xlarge
root_disk_size: 200
routing_weight: "100" # For blue/green: 0-255
# force_node_group_upgrade: false # Force version updates even when PDBs block pod eviction
components:
traefik_forward_auth_version: "0.0.14"

Expand Down
3 changes: 3 additions & 0 deletions examples/workload/ptd.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,9 @@ spec:
# Traffic routing weight (for blue/green deployments)
routing_weight: "100"

# Force node group version updates even when PDBs block pod eviction (default: false)
# force_node_group_upgrade: false

# Component versions
components:
traefik_forward_auth_version: "0.0.14"
Expand Down
1 change: 1 addition & 0 deletions python-pulumi/src/ptd/aws_workload.py
Original file line number Diff line number Diff line change
Expand Up @@ -258,6 +258,7 @@ class AWSWorkloadClusterConfig(ptd.WorkloadClusterConfig):
enable_efs_csi_driver: bool = False
efs_config: ptd.EFSConfig | None = None
karpenter_config: KarpenterConfig | None = None
force_node_group_upgrade: bool = False


@dataclasses.dataclass(frozen=True)
Expand Down
3 changes: 3 additions & 0 deletions python-pulumi/src/ptd/pulumi_resources/aws_eks_cluster.py
Original file line number Diff line number Diff line change
Expand Up @@ -528,6 +528,7 @@ def with_node_group(
taints: list[aws.eks.NodeGroupTaintArgs] | None = None,
depends_on: list[pulumi.Resource] | None = None,
*,
force_update_version: bool = False,
use_name: bool = False,
):
# TODO: what typing should we have for subnets? Consistency?
Expand All @@ -552,6 +553,7 @@ def with_node_group(
:param max_unavailable: Optional. The maximum number of unavailable nodes during an update. Default 1
:param taints: Optional. The Kubernetes taints to be applied to the nodes in the node group
:param depends_on: Optional. Resources that must be created before the node group (e.g., CNI)
:param force_update_version: Optional. Force version update even when PDBs block pod eviction. Default False
:param opts: Optional. Resource options.
:return: The AWSEKSCluster component resource
"""
Expand Down Expand Up @@ -601,6 +603,7 @@ def instance_type_check(t: str) -> None:
),
update_config=aws.eks.NodeGroupUpdateConfigArgs(max_unavailable=max_unavailable),
taints=taints,
force_update_version=force_update_version,
opts=pulumi.ResourceOptions(parent=self.eks, depends_on=depends_on),
)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -248,6 +248,7 @@ def _create_node_group(
version=cluster_cfg.cluster_version,
taints=eks_taints,
depends_on=depends_on,
force_update_version=cluster_cfg.force_node_group_upgrade,
)

def _define_tigera_operator(
Expand Down
13 changes: 13 additions & 0 deletions python-pulumi/tests/test_workload_cluster_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
import pytest

import ptd
import ptd.aws_workload


def test_workload_cluster_config_default_initialization():
Expand Down Expand Up @@ -308,3 +309,15 @@ def test_workload_cluster_config_custom_k8s_resources_in_workload():

assert workload_config.clusters["20250328"].custom_k8s_resources == ["storage", "common"]
assert workload_config.clusters["20250415"].custom_k8s_resources == ["monitoring"]


def test_aws_workload_cluster_config_force_node_group_upgrade_default():
"""Test that force_node_group_upgrade defaults to False."""
config = ptd.aws_workload.AWSWorkloadClusterConfig()
assert config.force_node_group_upgrade is False


def test_aws_workload_cluster_config_force_node_group_upgrade_enabled():
"""Test that force_node_group_upgrade can be set to True."""
config = ptd.aws_workload.AWSWorkloadClusterConfig(force_node_group_upgrade=True)
assert config.force_node_group_upgrade is True
Loading