From b11e47d9ec904ef1d150b4047073c21b71966667 Mon Sep 17 00:00:00 2001 From: daniel Date: Tue, 26 Apr 2022 13:13:05 -0500 Subject: [PATCH 1/8] First pass, autosetting within canaryopt 'create' classmethod --- servo/connectors/kubernetes.py | 57 ++++++++++++++++++++++++++++++++-- servo/connectors/opsani_dev.py | 5 +-- 2 files changed, 58 insertions(+), 4 deletions(-) diff --git a/servo/connectors/kubernetes.py b/servo/connectors/kubernetes.py index 026ea1d04..fa26b9baf 100644 --- a/servo/connectors/kubernetes.py +++ b/servo/connectors/kubernetes.py @@ -3622,6 +3622,7 @@ async def create( name = container_config.alias or ( f"{deployment.name}/{container.name}" if container else deployment.name ) + return cls( name=name, deployment_config=config, @@ -3955,6 +3956,26 @@ async def create( main_container = await deployment_or_rollout.get_target_container( container_config ) + + # Autoset CPU and memory range based on resources + if not container_config.cpu: + cpu_resource = Core( + main_container.get_resource_requirements("cpu").get( + ResourceRequirement.request + ) + ) + cpu_autoset = autoset_resource_range("cpu", value=cpu_resource) + container_config.cpu = cpu_autoset + + if not container_config.memory: + memory_resource = ShortByteSize.validate( + main_container.get_resource_requirements("memory").get( + ResourceRequirement.request + ) + ) + memory_autoset = autoset_resource_range("memory", value=memory_resource) + container_config.memory = memory_autoset + name = ( deployment_or_rollout_config.strategy.alias if isinstance( @@ -4911,8 +4932,8 @@ class ContainerConfiguration(servo.BaseConfiguration): name: ContainerTagName alias: Optional[ContainerTagName] command: Optional[str] # TODO: create model... - cpu: CPU - memory: Memory + cpu: Optional[CPU] + memory: Optional[Memory] env: Optional[list[servo.PydanticEnvironmentSettingAnnotation]] = None static_environment_variables: Optional[Dict[str, str]] @@ -5771,3 +5792,35 @@ def set_container_resource_defaults_from_config( f"Setting resource requirements for '{resource}' to: {requirements}" ) container.set_resource_requirements(resource, requirements) + + +def autoset_resource_range( + resource_type: Resource, + value: Union[Core, ShortByteSize], +) -> Union[CPU, Memory]: + + min_multiplier = 4 + max_multiplier = 3 + + servo.logger.trace(f"Retrieved {resource_type} defined resource: {value}") + + resource_min = value / min_multiplier + resource_max = value * max_multiplier + + if resource_type == Resource.cpu: + + resource_autoset = CPU( + min=Core(resource_min), max=Core(resource_max), step="250m" + ) + + elif resource_type == Resource.memory: + + resource_autoset = Memory( + min=ShortByteSize.validate(str(resource_min)), + max=ShortByteSize.validate(str(resource_max)), + step="256 MiB", + ) + + servo.logger.info(f"Autosetting {resource_type} range to: {resource_autoset}") + + return resource_autoset diff --git a/servo/connectors/opsani_dev.py b/servo/connectors/opsani_dev.py index 26823c13d..eb4bb06a7 100644 --- a/servo/connectors/opsani_dev.py +++ b/servo/connectors/opsani_dev.py @@ -63,8 +63,8 @@ class OpsaniDevConfiguration(servo.BaseConfiguration): container: str service: str port: Optional[Union[pydantic.StrictInt, str]] = None - cpu: CPU - memory: Memory + cpu: Optional[CPU] + memory: Optional[Memory] env: Optional[list[servo.PydanticEnvironmentSettingAnnotation]] static_environment_variables: Optional[Dict[str, str]] prometheus_base_url: str = PROMETHEUS_SIDECAR_BASE_URL @@ -110,6 +110,7 @@ def generate_kubernetes_config( Returns: A Kubernetes connector configuration object. """ + strategy: Union[ servo.connectors.kubernetes.CanaryOptimizationStrategyConfiguration, servo.connectors.kubernetes.DefaultOptimizationStrategyConfiguration, From 712843871c09951cb9c695eb9317fae6b3220d85 Mon Sep 17 00:00:00 2001 From: Daniel Howell Date: Wed, 11 May 2022 12:47:06 -0500 Subject: [PATCH 2/8] Move autoset to kubernetes connector startup, add tests --- servo/connectors/kubernetes.py | 126 ++++++++++++++++------------ servo/connectors/opsani_dev.py | 114 ++++++++++++++----------- tests/connectors/opsani_dev_test.py | 27 +++++- 3 files changed, 163 insertions(+), 104 deletions(-) diff --git a/servo/connectors/kubernetes.py b/servo/connectors/kubernetes.py index b057e6a13..217c26760 100644 --- a/servo/connectors/kubernetes.py +++ b/servo/connectors/kubernetes.py @@ -3972,25 +3972,6 @@ async def create( container_config ) - # Autoset CPU and memory range based on resources - if not container_config.cpu: - cpu_resource = Core( - main_container.get_resource_requirements("cpu").get( - ResourceRequirement.request - ) - ) - cpu_autoset = autoset_resource_range("cpu", value=cpu_resource) - container_config.cpu = cpu_autoset - - if not container_config.memory: - memory_resource = ShortByteSize.validate( - main_container.get_resource_requirements("memory").get( - ResourceRequirement.request - ) - ) - memory_autoset = autoset_resource_range("memory", value=memory_resource) - container_config.memory = memory_autoset - name = ( deployment_or_rollout_config.strategy.alias if isinstance( @@ -5333,16 +5314,17 @@ async def _check_container_resource_requirements( for resource in Resource.values(): current_state = None container_requirements = container.get_resource_requirements(resource) - get_requirements = getattr(cont_config, resource).get - for requirement in get_requirements: - current_state = container_requirements.get(requirement) - if current_state: - break + if getattr(cont_config, resource) is not None: + get_requirements = getattr(cont_config, resource).get + for requirement in get_requirements: + current_state = container_requirements.get(requirement) + if current_state: + break - assert current_state, ( - f"{type(target_controller).__name__} {target_config.name} target container {cont_config.name} spec does not define the resource {resource}. " - f"At least one of the following must be specified: {', '.join(map(lambda req: req.resources_key, get_requirements))}" - ) + assert current_state, ( + f"{type(target_controller).__name__} {target_config.name} target container {cont_config.name} spec does not define the resource {resource}. " + f"At least one of the following must be specified: {', '.join(map(lambda req: req.resources_key, get_requirements))}" + ) @servo.multicheck( 'Containers in the "{item.name}" Deployment have resource requirements' @@ -5405,6 +5387,44 @@ async def check_rollout(rol_config: RolloutConfiguration) -> None: class KubernetesConnector(servo.BaseConnector): config: KubernetesConfiguration + @servo.on_event() + async def startup(self) -> None: + + # Autoset CPU and memory range based on resources + for deployment_or_rollout_config in (self.config.deployments or []) + ( + self.config.rollouts or [] + ): + read_args = ( + deployment_or_rollout_config.name, + cast(str, deployment_or_rollout_config.namespace), + ) + deployment_or_rollout = await Deployment.read(*read_args) + container_config = deployment_or_rollout_config.containers[0] + main_container = await deployment_or_rollout.get_target_container( + container_config + ) + + if not container_config.cpu: + cpu_resources = main_container.get_resource_requirements("cpu") + # Set requests = limits if not specified + if (cpu_request := cpu_resources[ResourceRequirement.request]) is None: + cpu_request = cpu_resources[ResourceRequirement.limit] + + cpu_resource = Core(cpu_request) + cpu_autoset = autoset_resource_range("cpu", value=cpu_resource) + container_config.cpu = cpu_autoset + + if not container_config.memory: + memory_resources = main_container.get_resource_requirements("memory") + if ( + memory_request := memory_resources[ResourceRequirement.request] + ) is None: + memory_request = memory_resources[ResourceRequirement.limit] + + memory_resource = ShortByteSize.validate(memory_request) + memory_autoset = autoset_resource_range("memory", value=memory_resource) + container_config.memory = memory_autoset + @servo.on_event() async def attach(self, servo_: servo.Servo) -> None: # Ensure we are ready to talk to Kubernetes API @@ -5783,30 +5803,32 @@ def set_container_resource_defaults_from_config( container: Container, config: ContainerConfiguration ) -> None: for resource in Resource.values(): - # NOTE: cpu/memory stanza in container config - resource_config = getattr(config, resource) - requirements = container.get_resource_requirements(resource) - servo.logger.debug( - f"Loaded resource requirements for '{resource}': {requirements}" - ) - for requirement in ResourceRequirement: - # Use the request/limit from the container.[cpu|memory].[request|limit] as default/override - if resource_value := getattr(resource_config, requirement.name): - if (existing_resource_value := requirements.get(requirement)) is None: - servo.logger.debug( - f"Setting default value for {resource}.{requirement} to: {resource_value}" - ) - else: - servo.logger.debug( - f"Overriding existing value for {resource}.{requirement} ({existing_resource_value}) to: {resource_value}" - ) + # NOTE: cpu/memory stanza in container config (if set) + if (resource_config := getattr(config, resource)) is not None: + requirements = container.get_resource_requirements(resource) + servo.logger.debug( + f"Loaded resource requirements for '{resource}': {requirements}" + ) + for requirement in ResourceRequirement: + # Use the request/limit from the container.[cpu|memory].[request|limit] as default/override + if resource_value := getattr(resource_config, requirement.name): + if ( + existing_resource_value := requirements.get(requirement) + ) is None: + servo.logger.debug( + f"Setting default value for {resource}.{requirement} to: {resource_value}" + ) + else: + servo.logger.debug( + f"Overriding existing value for {resource}.{requirement} ({existing_resource_value}) to: {resource_value}" + ) - requirements[requirement] = resource_value + requirements[requirement] = resource_value - servo.logger.debug( - f"Setting resource requirements for '{resource}' to: {requirements}" - ) - container.set_resource_requirements(resource, requirements) + servo.logger.debug( + f"Setting resource requirements for '{resource}' to: {requirements}" + ) + container.set_resource_requirements(resource, requirements) def autoset_resource_range( @@ -5825,7 +5847,7 @@ def autoset_resource_range( if resource_type == Resource.cpu: resource_autoset = CPU( - min=Core(resource_min), max=Core(resource_max), step="250m" + min=Core(resource_min), max=Core(resource_max), step="125m" ) elif resource_type == Resource.memory: @@ -5833,7 +5855,7 @@ def autoset_resource_range( resource_autoset = Memory( min=ShortByteSize.validate(str(resource_min)), max=ShortByteSize.validate(str(resource_max)), - step="256 MiB", + step="128 MiB", ) servo.logger.info(f"Autosetting {resource_type} range to: {resource_autoset}") diff --git a/servo/connectors/opsani_dev.py b/servo/connectors/opsani_dev.py index b8ffcaed1..062afb8b6 100644 --- a/servo/connectors/opsani_dev.py +++ b/servo/connectors/opsani_dev.py @@ -436,16 +436,18 @@ async def check_resource_requirements(self) -> None: for resource in servo.connectors.kubernetes.Resource.values(): current_state = None container_requirements = container.get_resource_requirements(resource) - get_requirements = getattr(self.config, resource).get - for requirement in get_requirements: - current_state = container_requirements.get(requirement) - if current_state: - break - assert current_state, ( - f"{self.controller_type_name} {self.config_controller_name} target container {self.config.container} spec does not define the resource {resource}. " - f"At least one of the following must be specified: {', '.join(map(lambda req: req.resources_key, get_requirements))}" - ) + if getattr(self.config, resource) is not None: + get_requirements = getattr(self.config, resource).get + for requirement in get_requirements: + current_state = container_requirements.get(requirement) + if current_state: + break + + assert current_state, ( + f"{self.controller_type_name} {self.config_controller_name} target container {self.config.container} spec does not define the resource {resource}. " + f"At least one of the following must be specified: {', '.join(map(lambda req: req.resources_key, get_requirements))}" + ) @servo.checks.require("Target container resources fall within optimization range") async def check_target_container_resources_within_limits(self) -> None: @@ -473,53 +475,63 @@ async def check_target_container_resources_within_limits(self) -> None: # Get resource requirements from container # TODO: This needs to reuse the logic from CanaryOptimization class (tuning_cpu, tuning_memory, etc properties) - cpu_resource_requirements = target_container.get_resource_requirements("cpu") - cpu_resource_value = cpu_resource_requirements.get( - next( - filter( - lambda r: cpu_resource_requirements[r] is not None, - self.config.cpu.get, - ), - None, + if self.config.cpu is not None: + cpu_resource_requirements = target_container.get_resource_requirements( + "cpu" + ) + cpu_resource_value = cpu_resource_requirements.get( + next( + filter( + lambda r: cpu_resource_requirements[r] is not None, + self.config.cpu.get, + ), + None, + ) + ) + container_cpu_value = servo.connectors.kubernetes.Core.parse( + cpu_resource_value ) - ) - container_cpu_value = servo.connectors.kubernetes.Core.parse(cpu_resource_value) - memory_resource_requirements = target_container.get_resource_requirements( - "memory" - ) - memory_resource_value = memory_resource_requirements.get( - next( - filter( - lambda r: memory_resource_requirements[r] is not None, - self.config.memory.get, - ), - None, + # Get config values + config_cpu_min = self.config.cpu.min + config_cpu_max = self.config.cpu.max + + # Check values against config. + assert ( + container_cpu_value >= config_cpu_min + ), f"target container CPU value {container_cpu_value.human_readable()} must be greater than optimizable minimum {config_cpu_min.human_readable()}" + assert ( + container_cpu_value <= config_cpu_max + ), f"target container CPU value {container_cpu_value.human_readable()} must be less than optimizable maximum {config_cpu_max.human_readable()}" + + if self.config.memory is not None: + memory_resource_requirements = target_container.get_resource_requirements( + "memory" + ) + memory_resource_value = memory_resource_requirements.get( + next( + filter( + lambda r: memory_resource_requirements[r] is not None, + self.config.memory.get, + ), + None, + ) + ) + container_memory_value = servo.connectors.kubernetes.ShortByteSize.validate( + memory_resource_value ) - ) - container_memory_value = servo.connectors.kubernetes.ShortByteSize.validate( - memory_resource_value - ) - # Get config values - config_cpu_min = self.config.cpu.min - config_cpu_max = self.config.cpu.max - config_memory_min = self.config.memory.min - config_memory_max = self.config.memory.max + # Get config values + config_memory_min = self.config.memory.min + config_memory_max = self.config.memory.max - # Check values against config. - assert ( - container_cpu_value >= config_cpu_min - ), f"target container CPU value {container_cpu_value.human_readable()} must be greater than optimizable minimum {config_cpu_min.human_readable()}" - assert ( - container_cpu_value <= config_cpu_max - ), f"target container CPU value {container_cpu_value.human_readable()} must be less than optimizable maximum {config_cpu_max.human_readable()}" - assert ( - container_memory_value >= config_memory_min - ), f"target container Memory value {container_memory_value.human_readable()} must be greater than optimizable minimum {config_memory_min.human_readable()}" - assert ( - container_memory_value <= config_memory_max - ), f"target container Memory value {container_memory_value.human_readable()} must be less than optimizable maximum {config_memory_max.human_readable()}" + # Check values against config. + assert ( + container_memory_value >= config_memory_min + ), f"target container Memory value {container_memory_value.human_readable()} must be greater than optimizable minimum {config_memory_min.human_readable()}" + assert ( + container_memory_value <= config_memory_max + ), f"target container Memory value {container_memory_value.human_readable()} must be less than optimizable maximum {config_memory_max.human_readable()}" @servo.require( '{self.controller_type_name} "{self.config_controller_name}" is ready' diff --git a/tests/connectors/opsani_dev_test.py b/tests/connectors/opsani_dev_test.py index 71e754028..c544160f2 100644 --- a/tests/connectors/opsani_dev_test.py +++ b/tests/connectors/opsani_dev_test.py @@ -50,6 +50,17 @@ def config(kube) -> servo.connectors.opsani_dev.OpsaniDevConfiguration: ) +@pytest.fixture +def no_resources_config(kube) -> servo.connectors.opsani_dev.OpsaniDevConfiguration: + return servo.connectors.opsani_dev.OpsaniDevConfiguration( + namespace=kube.namespace, + deployment="fiber-http", + container="fiber-http", + service="fiber-http", + __optimizer__=servo.configuration.Optimizer(id="test.com/foo", token="12345"), + ) + + @pytest.fixture def no_tuning_config(kube) -> servo.connectors.opsani_dev.OpsaniDevConfiguration: return servo.connectors.opsani_dev.OpsaniDevConfiguration( @@ -185,10 +196,24 @@ def test_generate_kubernetes_config(self) -> None: 0 ].static_environment_variables == {"FOO": "BAR", "BAZ": "1"} + def test_generate_no_resources_config(self) -> None: + no_resources_config = servo.connectors.opsani_dev.OpsaniDevConfiguration( + namespace="test", + deployment="fiber-http", + container="fiber-http", + service="fiber-http", + __optimizer__=servo.configuration.Optimizer( + id="test.com/foo", token="12345" + ), + ) + no_resources_k_config = no_resources_config.generate_kubernetes_config() + assert no_resources_k_config.CPU is None + assert no_resources_k_config.Memory is None + def test_generate_no_tuning_config(self) -> None: no_tuning_config = servo.connectors.opsani_dev.OpsaniDevConfiguration( namespace="test", - rollout="fiber-http", + deployment="fiber-http", container="fiber-http", service="fiber-http", cpu=servo.connectors.kubernetes.CPU(min="125m", max="4000m", step="125m"), From bca3101b62fa5d9d2a344f0ae09605bbc97986b1 Mon Sep 17 00:00:00 2001 From: daniel Date: Wed, 11 May 2022 13:19:58 -0500 Subject: [PATCH 3/8] Fix tests --- tests/connectors/opsani_dev_test.py | 4 ++-- tests/kubernetes_test.py | 30 +++++++++++++++++++++++++++++ 2 files changed, 32 insertions(+), 2 deletions(-) diff --git a/tests/connectors/opsani_dev_test.py b/tests/connectors/opsani_dev_test.py index c544160f2..4255e8eb3 100644 --- a/tests/connectors/opsani_dev_test.py +++ b/tests/connectors/opsani_dev_test.py @@ -207,8 +207,8 @@ def test_generate_no_resources_config(self) -> None: ), ) no_resources_k_config = no_resources_config.generate_kubernetes_config() - assert no_resources_k_config.CPU is None - assert no_resources_k_config.Memory is None + assert no_resources_k_config.deployments[0].containers[0].cpu is None + assert no_resources_k_config.deployments[0].containers[0].memory is None def test_generate_no_tuning_config(self) -> None: no_tuning_config = servo.connectors.opsani_dev.OpsaniDevConfiguration( diff --git a/tests/kubernetes_test.py b/tests/kubernetes_test.py index f82098ba0..750376716 100644 --- a/tests/kubernetes_test.py +++ b/tests/kubernetes_test.py @@ -732,3 +732,33 @@ def test_copying_cpu_with_invalid_value_does_not_raise() -> None: # Use copy + update to hydrate the value cpu_copy = cpu.copy(update={"value": "5"}) assert cpu_copy.value == "5" + + +@pytest.mark.parametrize( + "value, resource_type, expected_autoset", + [ + ( + servo.connectors.kubernetes.Core.parse("1"), + "cpu", + servo.connectors.kubernetes.CPU(min="250m", max="3000m", step="125m"), + ), + ( + servo.connectors.kubernetes.Core.parse("2"), + "cpu", + servo.connectors.kubernetes.CPU(min="500m", max="6000m", step="125m"), + ), + ( + servo.connectors.kubernetes.ShortByteSize.validate("2Gi"), + "memory", + servo.connectors.kubernetes.Memory( + min="512.0MiB", max="6.0GiB", step="128.0MiB" + ), + ), + ], +) +def test_autoset_resource_range(value, resource_type, expected_autoset): + + autoset_value = servo.connectors.kubernetes.autoset_resource_range( + resource_type=resource_type, value=value + ) + assert autoset_value == expected_autoset From b943c8c97e51476822c051c11234edc771935695 Mon Sep 17 00:00:00 2001 From: daniel Date: Wed, 11 May 2022 13:30:33 -0500 Subject: [PATCH 4/8] Formatting --- tests/kubernetes_test.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/kubernetes_test.py b/tests/kubernetes_test.py index 750376716..f7abdf8c2 100644 --- a/tests/kubernetes_test.py +++ b/tests/kubernetes_test.py @@ -743,9 +743,9 @@ def test_copying_cpu_with_invalid_value_does_not_raise() -> None: servo.connectors.kubernetes.CPU(min="250m", max="3000m", step="125m"), ), ( - servo.connectors.kubernetes.Core.parse("2"), - "cpu", - servo.connectors.kubernetes.CPU(min="500m", max="6000m", step="125m"), + servo.connectors.kubernetes.Core.parse("2"), + "cpu", + servo.connectors.kubernetes.CPU(min="500m", max="6000m", step="125m"), ), ( servo.connectors.kubernetes.ShortByteSize.validate("2Gi"), From b678c9643da7cc02f38bb8d92baff70420eee70a Mon Sep 17 00:00:00 2001 From: daniel Date: Thu, 12 May 2022 15:04:30 -0500 Subject: [PATCH 5/8] Add defaults when create_tuning_pod is False --- servo/connectors/opsani_dev.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/servo/connectors/opsani_dev.py b/servo/connectors/opsani_dev.py index 062afb8b6..5748202c8 100644 --- a/servo/connectors/opsani_dev.py +++ b/servo/connectors/opsani_dev.py @@ -116,6 +116,7 @@ def generate_kubernetes_config( ] = servo.connectors.kubernetes.DefaultOptimizationStrategyConfiguration() if self.create_tuning_pod: + strategy = ( servo.connectors.kubernetes.CanaryOptimizationStrategyConfiguration( type=servo.connectors.kubernetes.OptimizationStrategy.canary, @@ -130,6 +131,13 @@ def generate_kubernetes_config( else: # NOTE: currently assuming we NEVER want to adjust the main deployment with the opsani_dev connector # TODO: Do we ever need to support opsani dev bootstrapping of non-canary adjusted optimization of deployments? + + # Just load defaults when create_tuning_pod is False - these values aren't used and just hold the pinned setting + if not self.cpu: + self.cpu = (CPU(min="250m", max="3000m"),) + if not memory: + self.memory = (Memory(min="256 MiB", max="3.0 GiB"),) + self.cpu.pinned = True self.memory.pinned = True From 21596379e891b954ae282b2c9f3dddf3e15d2b4e Mon Sep 17 00:00:00 2001 From: daniel Date: Thu, 12 May 2022 15:14:04 -0500 Subject: [PATCH 6/8] Forgot self --- servo/connectors/opsani_dev.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/servo/connectors/opsani_dev.py b/servo/connectors/opsani_dev.py index 5748202c8..6891ba60e 100644 --- a/servo/connectors/opsani_dev.py +++ b/servo/connectors/opsani_dev.py @@ -135,7 +135,7 @@ def generate_kubernetes_config( # Just load defaults when create_tuning_pod is False - these values aren't used and just hold the pinned setting if not self.cpu: self.cpu = (CPU(min="250m", max="3000m"),) - if not memory: + if not self.memory: self.memory = (Memory(min="256 MiB", max="3.0 GiB"),) self.cpu.pinned = True From 4de556d447538f78c5d919e1eee76f8015e55c3b Mon Sep 17 00:00:00 2001 From: daniel Date: Thu, 12 May 2022 15:14:48 -0500 Subject: [PATCH 7/8] Fix weird black formatting --- servo/connectors/opsani_dev.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/servo/connectors/opsani_dev.py b/servo/connectors/opsani_dev.py index 6891ba60e..f968175e0 100644 --- a/servo/connectors/opsani_dev.py +++ b/servo/connectors/opsani_dev.py @@ -134,9 +134,9 @@ def generate_kubernetes_config( # Just load defaults when create_tuning_pod is False - these values aren't used and just hold the pinned setting if not self.cpu: - self.cpu = (CPU(min="250m", max="3000m"),) + self.cpu = CPU(min="250m", max="3000m") if not self.memory: - self.memory = (Memory(min="256 MiB", max="3.0 GiB"),) + self.memory = Memory(min="256 MiB", max="3.0 GiB") self.cpu.pinned = True self.memory.pinned = True From e508398649895cd851cbf4be70675389a71ca58b Mon Sep 17 00:00:00 2001 From: daniel Date: Thu, 26 May 2022 01:01:29 -0500 Subject: [PATCH 8/8] Add multiplier to config, fix parsing --- servo/connectors/kubernetes.py | 23 ++++++++++++++++------- servo/connectors/opsani_dev.py | 7 +++++++ 2 files changed, 23 insertions(+), 7 deletions(-) diff --git a/servo/connectors/kubernetes.py b/servo/connectors/kubernetes.py index 217c26760..56fa79e34 100644 --- a/servo/connectors/kubernetes.py +++ b/servo/connectors/kubernetes.py @@ -4929,7 +4929,9 @@ class ContainerConfiguration(servo.BaseConfiguration): alias: Optional[ContainerTagName] command: Optional[str] # TODO: create model... cpu: Optional[CPU] + cpu_autoset_multiplier: pydantic.conlist(float, min_items=2, max_items=2) memory: Optional[Memory] + memory_autoset_multiplier: pydantic.conlist(float, min_items=2, max_items=2) env: Optional[servo.EnvironmentSettingList] static_environment_variables: Optional[Dict[str, str]] @@ -5410,8 +5412,12 @@ async def startup(self) -> None: if (cpu_request := cpu_resources[ResourceRequirement.request]) is None: cpu_request = cpu_resources[ResourceRequirement.limit] - cpu_resource = Core(cpu_request) - cpu_autoset = autoset_resource_range("cpu", value=cpu_resource) + cpu_resource = Core.parse(cpu_request).__opsani_repr__() + cpu_autoset = autoset_resource_range( + "cpu", + value=cpu_resource, + multiplier=container_config.cpu_autoset_multiplier, + ) container_config.cpu = cpu_autoset if not container_config.memory: @@ -5422,7 +5428,11 @@ async def startup(self) -> None: memory_request = memory_resources[ResourceRequirement.limit] memory_resource = ShortByteSize.validate(memory_request) - memory_autoset = autoset_resource_range("memory", value=memory_resource) + memory_autoset = autoset_resource_range( + "memory", + value=memory_resource, + multiplier=container_config.memory_autoset_multiplier, + ) container_config.memory = memory_autoset @servo.on_event() @@ -5832,12 +5842,11 @@ def set_container_resource_defaults_from_config( def autoset_resource_range( - resource_type: Resource, - value: Union[Core, ShortByteSize], + resource_type: Resource, value: float, multiplier: list[float] ) -> Union[CPU, Memory]: - min_multiplier = 4 - max_multiplier = 3 + min_multiplier = multiplier[0] + max_multiplier = multiplier[1] servo.logger.trace(f"Retrieved {resource_type} defined resource: {value}") diff --git a/servo/connectors/opsani_dev.py b/servo/connectors/opsani_dev.py index f968175e0..3a223394c 100644 --- a/servo/connectors/opsani_dev.py +++ b/servo/connectors/opsani_dev.py @@ -64,7 +64,12 @@ class OpsaniDevConfiguration(servo.BaseConfiguration): service: str port: Optional[Union[pydantic.StrictInt, str]] = None cpu: Optional[CPU] + cpu_autoset_multiplier: pydantic.conlist(float, min_items=2, max_items=2) = [4, 3] memory: Optional[Memory] + memory_autoset_multiplier: pydantic.conlist(float, min_items=2, max_items=2) = [ + 4, + 3, + ] env: Optional[servo.EnvironmentSettingList] static_environment_variables: Optional[Dict[str, str]] prometheus_base_url: str = PROMETHEUS_SIDECAR_BASE_URL @@ -152,7 +157,9 @@ def generate_kubernetes_config( name=self.container, alias="main", cpu=self.cpu, + cpu_autoset_multiplier=self.cpu_autoset_multiplier, memory=self.memory, + memory_autoset_multiplier=self.memory_autoset_multiplier, static_environment_variables=self.static_environment_variables, env=self.env, )