From 303c8c1dd26b23f49c6b21b53d15f9b8460bf63d Mon Sep 17 00:00:00 2001 From: Mateusz Kwasniewski Date: Wed, 21 Jan 2026 09:44:59 +0100 Subject: [PATCH 01/14] feat: impact metrics --- UnleashClient/__init__.py | 10 ++ UnleashClient/impact_metrics.py | 84 ++++++++++ UnleashClient/periodic_tasks/send_metrics.py | 12 +- requirements.txt | 2 +- tests/unit_tests/test_impact_metrics.py | 168 +++++++++++++++++++ 5 files changed, 273 insertions(+), 3 deletions(-) create mode 100644 UnleashClient/impact_metrics.py create mode 100644 tests/unit_tests/test_impact_metrics.py diff --git a/UnleashClient/__init__.py b/UnleashClient/__init__.py index 096b7d2f..f720b623 100644 --- a/UnleashClient/__init__.py +++ b/UnleashClient/__init__.py @@ -41,6 +41,8 @@ UnleashEventType, UnleashReadyEvent, ) +from UnleashClient.impact_metrics import ImpactMetrics +from UnleashClient.impact_metrics import MetricFlagContext as MetricFlagContext from UnleashClient.periodic_tasks import ( aggregate_and_send_metrics, ) @@ -205,6 +207,9 @@ def __init__( self.fl_job: Job = None self.metric_job: Job = None self.engine = UnleashEngine() + self._impact_metrics = ImpactMetrics( + self.engine, self.unleash_app_name, self.unleash_environment + ) self.cache = cache or FileCache( self.unleash_app_name, directory=cache_directory @@ -273,6 +278,11 @@ def connection_id(self): def is_initialized(self): return self._run_state == _RunState.INITIALIZED + @property + def impact_metrics(self) -> ImpactMetrics: + """Access impact metrics functionality for recording custom metrics with flag context.""" + return self._impact_metrics + def initialize_client(self, fetch_toggles: bool = True) -> None: """ Initializes client and starts communication with central unleash server(s). diff --git a/UnleashClient/impact_metrics.py b/UnleashClient/impact_metrics.py new file mode 100644 index 00000000..babc3d02 --- /dev/null +++ b/UnleashClient/impact_metrics.py @@ -0,0 +1,84 @@ +from dataclasses import dataclass, field +from typing import Any, Dict, List, Optional + +from yggdrasil_engine.engine import UnleashEngine + + +@dataclass +class MetricFlagContext: + """Context for resolving feature flag values as metric labels.""" + + flag_names: List[str] = field(default_factory=list) + context: Dict[str, Any] = field(default_factory=dict) + + +class ImpactMetrics: + """ + Provides methods to define and record metrics (counters, gauges, histograms) + with optional feature flag context that gets resolved to labels. + """ + + def __init__(self, engine: UnleashEngine, app_name: str, environment: str): + self._engine = engine + self._app_name = app_name + self._environment = environment + + def define_counter(self, name: str, help_text: str) -> None: + self._engine.define_counter(name, help_text) + + def increment_counter( + self, + name: str, + value: int = 1, + flag_context: Optional[MetricFlagContext] = None, + ) -> None: + labels = self._resolve_labels(flag_context) + self._engine.inc_counter(name, value, labels) + + def define_gauge(self, name: str, help_text: str) -> None: + self._engine.define_gauge(name, help_text) + + def update_gauge( + self, + name: str, + value: int, + flag_context: Optional[MetricFlagContext] = None, + ) -> None: + labels = self._resolve_labels(flag_context) + self._engine.set_gauge(name, value, labels) + + def define_histogram( + self, name: str, help_text: str, buckets: Optional[List[float]] = None + ) -> None: + self._engine.define_histogram(name, help_text, buckets) + + def observe_histogram( + self, + name: str, + value: float, + flag_context: Optional[MetricFlagContext] = None, + ) -> None: + labels = self._resolve_labels(flag_context) + self._engine.observe_histogram(name, value, labels) + + def _resolve_labels( + self, flag_context: Optional[MetricFlagContext] + ) -> Optional[Dict[str, str]]: + if flag_context is None: + return None + + labels: Dict[str, str] = { + "appName": self._app_name, + "environment": self._environment, + } + + for flag_name in flag_context.flag_names: + variant = self._engine.get_variant(flag_name, flag_context.context) + if variant and variant.enabled: + labels[flag_name] = variant.name + elif variant and variant.feature_enabled: + labels[flag_name] = "enabled" + else: + labels[flag_name] = "disabled" + + return labels diff --git a/UnleashClient/periodic_tasks/send_metrics.py b/UnleashClient/periodic_tasks/send_metrics.py index 55fed20f..727a1367 100644 --- a/UnleashClient/periodic_tasks/send_metrics.py +++ b/UnleashClient/periodic_tasks/send_metrics.py @@ -19,6 +19,7 @@ def aggregate_and_send_metrics( engine: UnleashEngine, ) -> None: metrics_bucket = engine.get_metrics() + impact_metrics = engine.collect_impact_metrics() metrics_request = { "appName": app_name, @@ -31,7 +32,14 @@ def aggregate_and_send_metrics( "specVersion": CLIENT_SPEC_VERSION, } - if metrics_bucket: - send_metrics(url, metrics_request, headers, custom_options, request_timeout) + if impact_metrics: + metrics_request["impactMetrics"] = impact_metrics + + if metrics_bucket or impact_metrics: + success = send_metrics( + url, metrics_request, headers, custom_options, request_timeout + ) + if not success and impact_metrics: + engine.restore_impact_metrics(impact_metrics) else: LOGGER.debug("No feature flags with metrics, skipping metrics submission.") diff --git a/requirements.txt b/requirements.txt index 55041ecb..b85f09d2 100644 --- a/requirements.txt +++ b/requirements.txt @@ -6,7 +6,7 @@ mmhash3 python-dateutil requests semver -yggdrasil-engine>=1.0.0 +yggdrasil-engine>=1.1.0 launchdarkly-eventsource # Development packages diff --git a/tests/unit_tests/test_impact_metrics.py b/tests/unit_tests/test_impact_metrics.py new file mode 100644 index 00000000..8ca667b6 --- /dev/null +++ b/tests/unit_tests/test_impact_metrics.py @@ -0,0 +1,168 @@ +import json + +import pytest +import responses + +from tests.utilities.testing_constants import ( + APP_NAME, + ENVIRONMENT, + INSTANCE_ID, + URL, +) +from UnleashClient import INSTANCES, UnleashClient +from UnleashClient.cache import FileCache +from UnleashClient.constants import METRICS_URL +from UnleashClient.impact_metrics import MetricFlagContext +from UnleashClient.periodic_tasks import aggregate_and_send_metrics + +MOCK_FEATURES_RESPONSE = { + "version": 1, + "features": [ + { + "name": "feature-with-variant", + "enabled": True, + "strategies": [{"name": "default", "parameters": {}}], + "variants": [{"name": "treatment", "weight": 1000}], + }, + { + "name": "enabled-feature", + "enabled": True, + "strategies": [{"name": "default", "parameters": {}}], + }, + { + "name": "disabled-feature", + "enabled": False, + "strategies": [{"name": "default", "parameters": {}}], + }, + ], +} + + +@pytest.fixture(autouse=True) +def before_each(): + INSTANCES._reset() + + +@pytest.fixture +def unleash_client(): + cache = FileCache("TEST_CACHE") + cache.bootstrap_from_dict(MOCK_FEATURES_RESPONSE) + client = UnleashClient( + url=URL, + app_name=APP_NAME, + environment=ENVIRONMENT, + instance_id=INSTANCE_ID, + disable_metrics=True, + disable_registration=True, + cache=cache, + ) + client.initialize_client(fetch_toggles=False) + yield client + client.destroy() + + +class TestSendMetricsViaClient: + @responses.activate + def test_impact_metrics_with_labels_in_payload(self, unleash_client): + responses.add(responses.POST, URL + METRICS_URL, json={}, status=202) + + flag_context = MetricFlagContext( + flag_names=["feature-with-variant", "enabled-feature", "disabled-feature"], + context={"userId": "123"}, + ) + + unleash_client.impact_metrics.define_counter("purchases", "Number of purchases") + unleash_client.impact_metrics.increment_counter("purchases", 1, flag_context) + + unleash_client.impact_metrics.define_gauge("active_users", "Active users") + unleash_client.impact_metrics.update_gauge("active_users", 42, flag_context) + + unleash_client.impact_metrics.define_histogram("latency", "Request latency", [0.1, 0.5, 1.0]) + unleash_client.impact_metrics.observe_histogram("latency", 0.3, flag_context) + + aggregate_and_send_metrics( + url=URL, + app_name=APP_NAME, + instance_id=INSTANCE_ID, + connection_id="test-connection", + headers={}, + custom_options={}, + request_timeout=30, + engine=unleash_client.engine, + ) + + request_body = json.loads(responses.calls[0].request.body) + metrics = {m["name"]: m for m in request_body["impactMetrics"]} + + expected_labels = { + "appName": APP_NAME, + "environment": ENVIRONMENT, + "feature-with-variant": "treatment", + "enabled-feature": "enabled", + "disabled-feature": "disabled", + } + + assert metrics == { + "purchases": { + "name": "purchases", + "help": "Number of purchases", + "type": "counter", + "samples": [{"labels": expected_labels, "value": 1}], + }, + "active_users": { + "name": "active_users", + "help": "Active users", + "type": "gauge", + "samples": [{"labels": expected_labels, "value": 42}], + }, + "latency": { + "name": "latency", + "help": "Request latency", + "type": "histogram", + "samples": [ + { + "labels": expected_labels, + "count": 1, + "sum": 0.3, + "buckets": [ + {"le": 0.1, "count": 0}, + {"le": 0.5, "count": 1}, + {"le": 1.0, "count": 1}, + {"le": "+Inf", "count": 1}, + ], + } + ], + }, + } + + @responses.activate + def test_impact_metrics_resent_after_failure(self, unleash_client): + responses.add(responses.POST, URL + METRICS_URL, json={}, status=500) + responses.add(responses.POST, URL + METRICS_URL, json={}, status=202) + + unleash_client.impact_metrics.define_counter("my_counter", "Test counter") + unleash_client.impact_metrics.increment_counter("my_counter", 5) + + aggregate_and_send_metrics( + url=URL, + app_name=APP_NAME, + instance_id=INSTANCE_ID, + connection_id="test-connection", + headers={}, + custom_options={}, + request_timeout=30, + engine=unleash_client.engine, + ) + aggregate_and_send_metrics( + url=URL, + app_name=APP_NAME, + instance_id=INSTANCE_ID, + connection_id="test-connection", + headers={}, + custom_options={}, + request_timeout=30, + engine=unleash_client.engine, + ) + + request_body = json.loads(responses.calls[1].request.body) + assert request_body["impactMetrics"][0]["name"] == "my_counter" From 41adbe3c046ccc87e7dec0d18e17ab8b83473eb5 Mon Sep 17 00:00:00 2001 From: Mateusz Kwasniewski Date: Wed, 21 Jan 2026 09:56:58 +0100 Subject: [PATCH 02/14] feat: impact metrics --- UnleashClient/__init__.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/UnleashClient/__init__.py b/UnleashClient/__init__.py index f720b623..9c065e15 100644 --- a/UnleashClient/__init__.py +++ b/UnleashClient/__init__.py @@ -42,7 +42,6 @@ UnleashReadyEvent, ) from UnleashClient.impact_metrics import ImpactMetrics -from UnleashClient.impact_metrics import MetricFlagContext as MetricFlagContext from UnleashClient.periodic_tasks import ( aggregate_and_send_metrics, ) @@ -280,7 +279,6 @@ def is_initialized(self): @property def impact_metrics(self) -> ImpactMetrics: - """Access impact metrics functionality for recording custom metrics with flag context.""" return self._impact_metrics def initialize_client(self, fetch_toggles: bool = True) -> None: From 5730f0ce9e5b0635467363d703e6dd769e6aaa88 Mon Sep 17 00:00:00 2001 From: Mateusz Kwasniewski Date: Wed, 21 Jan 2026 10:09:09 +0100 Subject: [PATCH 03/14] feat: impact metrics --- README.md | 52 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 52 insertions(+) diff --git a/README.md b/README.md index 763770a4..f7adea25 100644 --- a/README.md +++ b/README.md @@ -319,6 +319,58 @@ client.is_enabled("testFlag") ``` +### Impact metrics + +Impact metrics are lightweight, application-level time-series metrics stored and visualized directly inside Unleash. They allow you to connect specific application data, such as request counts, error rates, or latency, to your feature flags and release plans. + +These metrics help validate feature impact and automate release processes. For instance, you can monitor usage patterns or performance to determine if a feature meets its goals. + +The SDK automatically attaches context labels to metrics: `appName` and `environment`. + +#### Counters + +Use counters for cumulative values that only increase (total requests, errors): + +```python +client.impact_metrics.define_counter( + "request_count", + "Total number of HTTP requests processed" +) + +client.impact_metrics.increment_counter("request_count") +``` + +#### Gauges + +Use gauges for fluctuating values like memory usage or active connections: + +```python +import psutil + +client.impact_metrics.define_gauge( + "memory_usage", + "Current memory usage in bytes" +) + +current_memory = psutil.Process().memory_info().rss +client.impact_metrics.update_gauge("memory_usage", current_memory) +``` + +#### Histograms + +Histograms measure value distribution (request duration, response size): + +```python +client.impact_metrics.define_histogram( + "request_time_ms", + "Time taken to process a request in milliseconds" +) + +client.impact_metrics.observe_histogram("request_time_ms", 125) +``` + +Impact metrics are batched and sent using the same interval as standard SDK metrics. + ### Custom cache By default, the Python SDK stores feature flags in an on-disk cache using fcache. If you need a different storage backend, for example, Redis, memory-only, or a custom database, you can provide your own cache implementation. From 8b216087718684ac9789931af2cfd2a3887cb1f0 Mon Sep 17 00:00:00 2001 From: Mateusz Kwasniewski Date: Wed, 21 Jan 2026 10:15:04 +0100 Subject: [PATCH 04/14] feat: impact metrics --- tests/integration_tests/integration.py | 44 ++++++++++++++++++++++++-- 1 file changed, 42 insertions(+), 2 deletions(-) diff --git a/tests/integration_tests/integration.py b/tests/integration_tests/integration.py index 2513b8ac..e2c2c316 100644 --- a/tests/integration_tests/integration.py +++ b/tests/integration_tests/integration.py @@ -1,9 +1,11 @@ # --- import logging +import random import sys import time from UnleashClient import UnleashClient +from UnleashClient.impact_metrics import MetricFlagContext root = logging.getLogger() root.setLevel(logging.DEBUG) @@ -15,10 +17,48 @@ root.addHandler(handler) # --- -my_client = UnleashClient(url="http://localhost:4242/api", app_name="pyIvan") +my_client = UnleashClient( + url="https://sandbox.getunleash.io/enterprise/api/", + app_name="pyIvan", + environment="development", + custom_headers={ + "Authorization": "impact-metrics:development.6d70e55dd70dd79f5be3ce97835996000db94ed997c492421effe935" + }, +) my_client.initialize_client() +# Define impact metrics +my_client.impact_metrics.define_counter("purchases", "Number of purchases made") +my_client.impact_metrics.define_gauge("active_users", "Current number of active users") +my_client.impact_metrics.define_histogram( + "request_latency", "Request latency in seconds", [0.01, 0.05, 0.1, 0.5, 1.0, 5.0] +) + +# Track active users count +active_users = 100 + while True: time.sleep(10) - print(my_client.is_enabled("Demo")) + print(f"Demo enabled: {my_client.is_enabled('Demo')}") + + # Create flag context for metrics (ties metrics to feature flag state) + flag_context = MetricFlagContext( + flag_names=["Demo"], + context={"userId": "integration-test-user"}, + ) + + # Increment counter (simulate a purchase) + my_client.impact_metrics.increment_counter("purchases", 1, flag_context) + print("Incremented purchases counter") + + # Update gauge (simulate active users changing) + active_users += random.randint(-10, 15) + active_users = max(0, active_users) # Don't go negative + my_client.impact_metrics.update_gauge("active_users", active_users, flag_context) + print(f"Updated active_users gauge to {active_users}") + + # Observe histogram (simulate request latency) + latency = random.uniform(0.01, 2.0) + my_client.impact_metrics.observe_histogram("request_latency", latency, flag_context) + print(f"Observed request_latency: {latency:.3f}s") From a637b071bc32f261c888c62b95523b9b58923873 Mon Sep 17 00:00:00 2001 From: Mateusz Kwasniewski Date: Wed, 21 Jan 2026 10:19:24 +0100 Subject: [PATCH 05/14] feat: impact metrics --- README.md | 3 +- tests/integration_tests/integration.py | 44 ++------------------------ 2 files changed, 4 insertions(+), 43 deletions(-) diff --git a/README.md b/README.md index f7adea25..45e588c4 100644 --- a/README.md +++ b/README.md @@ -363,7 +363,8 @@ Histograms measure value distribution (request duration, response size): ```python client.impact_metrics.define_histogram( "request_time_ms", - "Time taken to process a request in milliseconds" + "Time taken to process a request in milliseconds", + [50, 100, 200, 500, 1000] ) client.impact_metrics.observe_histogram("request_time_ms", 125) diff --git a/tests/integration_tests/integration.py b/tests/integration_tests/integration.py index e2c2c316..2513b8ac 100644 --- a/tests/integration_tests/integration.py +++ b/tests/integration_tests/integration.py @@ -1,11 +1,9 @@ # --- import logging -import random import sys import time from UnleashClient import UnleashClient -from UnleashClient.impact_metrics import MetricFlagContext root = logging.getLogger() root.setLevel(logging.DEBUG) @@ -17,48 +15,10 @@ root.addHandler(handler) # --- -my_client = UnleashClient( - url="https://sandbox.getunleash.io/enterprise/api/", - app_name="pyIvan", - environment="development", - custom_headers={ - "Authorization": "impact-metrics:development.6d70e55dd70dd79f5be3ce97835996000db94ed997c492421effe935" - }, -) +my_client = UnleashClient(url="http://localhost:4242/api", app_name="pyIvan") my_client.initialize_client() -# Define impact metrics -my_client.impact_metrics.define_counter("purchases", "Number of purchases made") -my_client.impact_metrics.define_gauge("active_users", "Current number of active users") -my_client.impact_metrics.define_histogram( - "request_latency", "Request latency in seconds", [0.01, 0.05, 0.1, 0.5, 1.0, 5.0] -) - -# Track active users count -active_users = 100 - while True: time.sleep(10) - print(f"Demo enabled: {my_client.is_enabled('Demo')}") - - # Create flag context for metrics (ties metrics to feature flag state) - flag_context = MetricFlagContext( - flag_names=["Demo"], - context={"userId": "integration-test-user"}, - ) - - # Increment counter (simulate a purchase) - my_client.impact_metrics.increment_counter("purchases", 1, flag_context) - print("Incremented purchases counter") - - # Update gauge (simulate active users changing) - active_users += random.randint(-10, 15) - active_users = max(0, active_users) # Don't go negative - my_client.impact_metrics.update_gauge("active_users", active_users, flag_context) - print(f"Updated active_users gauge to {active_users}") - - # Observe histogram (simulate request latency) - latency = random.uniform(0.01, 2.0) - my_client.impact_metrics.observe_histogram("request_latency", latency, flag_context) - print(f"Observed request_latency: {latency:.3f}s") + print(my_client.is_enabled("Demo")) From a9fa478cf85e0a920a6202a6ccfbbc13664c8062 Mon Sep 17 00:00:00 2001 From: Mateusz Kwasniewski Date: Wed, 21 Jan 2026 10:39:18 +0100 Subject: [PATCH 06/14] feat: impact metrics --- UnleashClient/impact_metrics.py | 10 ++--- tests/unit_tests/test_impact_metrics.py | 59 ++++++++++++++++++------- 2 files changed, 47 insertions(+), 22 deletions(-) diff --git a/UnleashClient/impact_metrics.py b/UnleashClient/impact_metrics.py index babc3d02..67213af6 100644 --- a/UnleashClient/impact_metrics.py +++ b/UnleashClient/impact_metrics.py @@ -41,7 +41,7 @@ def define_gauge(self, name: str, help_text: str) -> None: def update_gauge( self, name: str, - value: int, + value: float, flag_context: Optional[MetricFlagContext] = None, ) -> None: labels = self._resolve_labels(flag_context) @@ -63,15 +63,15 @@ def observe_histogram( def _resolve_labels( self, flag_context: Optional[MetricFlagContext] - ) -> Optional[Dict[str, str]]: - if flag_context is None: - return None - + ) -> Dict[str, str]: labels: Dict[str, str] = { "appName": self._app_name, "environment": self._environment, } + if flag_context is None: + return labels + for flag_name in flag_context.flag_names: variant = self._engine.get_variant(flag_name, flag_context.context) if variant and variant.enabled: diff --git a/tests/unit_tests/test_impact_metrics.py b/tests/unit_tests/test_impact_metrics.py index 8ca667b6..8a8065dc 100644 --- a/tests/unit_tests/test_impact_metrics.py +++ b/tests/unit_tests/test_impact_metrics.py @@ -63,22 +63,19 @@ def unleash_client(): class TestSendMetricsViaClient: @responses.activate - def test_impact_metrics_with_labels_in_payload(self, unleash_client): + def test_impact_metrics_in_payload(self, unleash_client): responses.add(responses.POST, URL + METRICS_URL, json={}, status=202) - flag_context = MetricFlagContext( - flag_names=["feature-with-variant", "enabled-feature", "disabled-feature"], - context={"userId": "123"}, - ) - unleash_client.impact_metrics.define_counter("purchases", "Number of purchases") - unleash_client.impact_metrics.increment_counter("purchases", 1, flag_context) + unleash_client.impact_metrics.increment_counter("purchases", 1) unleash_client.impact_metrics.define_gauge("active_users", "Active users") - unleash_client.impact_metrics.update_gauge("active_users", 42, flag_context) + unleash_client.impact_metrics.update_gauge("active_users", 42) - unleash_client.impact_metrics.define_histogram("latency", "Request latency", [0.1, 0.5, 1.0]) - unleash_client.impact_metrics.observe_histogram("latency", 0.3, flag_context) + unleash_client.impact_metrics.define_histogram( + "latency", "Request latency", [0.1, 0.5, 1.0] + ) + unleash_client.impact_metrics.observe_histogram("latency", 0.3) aggregate_and_send_metrics( url=URL, @@ -94,13 +91,7 @@ def test_impact_metrics_with_labels_in_payload(self, unleash_client): request_body = json.loads(responses.calls[0].request.body) metrics = {m["name"]: m for m in request_body["impactMetrics"]} - expected_labels = { - "appName": APP_NAME, - "environment": ENVIRONMENT, - "feature-with-variant": "treatment", - "enabled-feature": "enabled", - "disabled-feature": "disabled", - } + expected_labels = {"appName": APP_NAME, "environment": ENVIRONMENT} assert metrics == { "purchases": { @@ -135,6 +126,40 @@ def test_impact_metrics_with_labels_in_payload(self, unleash_client): }, } + @responses.activate + def test_impact_metrics_with_flag_context(self, unleash_client): + responses.add(responses.POST, URL + METRICS_URL, json={}, status=202) + + flag_context = MetricFlagContext( + flag_names=["feature-with-variant", "enabled-feature", "disabled-feature"], + context={"userId": "123"}, + ) + + unleash_client.impact_metrics.define_counter("purchases", "Number of purchases") + unleash_client.impact_metrics.increment_counter("purchases", 1, flag_context) + + aggregate_and_send_metrics( + url=URL, + app_name=APP_NAME, + instance_id=INSTANCE_ID, + connection_id="test-connection", + headers={}, + custom_options={}, + request_timeout=30, + engine=unleash_client.engine, + ) + + request_body = json.loads(responses.calls[0].request.body) + labels = request_body["impactMetrics"][0]["samples"][0]["labels"] + + assert labels == { + "appName": APP_NAME, + "environment": ENVIRONMENT, + "feature-with-variant": "treatment", + "enabled-feature": "enabled", + "disabled-feature": "disabled", + } + @responses.activate def test_impact_metrics_resent_after_failure(self, unleash_client): responses.add(responses.POST, URL + METRICS_URL, json={}, status=500) From 2ee3eb8865c810aafa75f91ab7da2967839f0f5d Mon Sep 17 00:00:00 2001 From: Mateusz Kwasniewski Date: Wed, 21 Jan 2026 10:41:10 +0100 Subject: [PATCH 07/14] feat: impact metrics --- README.md | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index 45e588c4..1658878c 100644 --- a/README.md +++ b/README.md @@ -342,18 +342,15 @@ client.impact_metrics.increment_counter("request_count") #### Gauges -Use gauges for fluctuating values like memory usage or active connections: +Use gauges for point-in-time values that can go up or down: ```python -import psutil - client.impact_metrics.define_gauge( - "memory_usage", - "Current memory usage in bytes" + "total_users", + "Total number of registered users" ) -current_memory = psutil.Process().memory_info().rss -client.impact_metrics.update_gauge("memory_usage", current_memory) +client.impact_metrics.update_gauge("total_users", user_count) ``` #### Histograms From bca1d320f4e40a665f89a4fbf1fa68792470856e Mon Sep 17 00:00:00 2001 From: Mateusz Kwasniewski Date: Wed, 21 Jan 2026 13:45:19 +0100 Subject: [PATCH 08/14] feat: impact metrics --- UnleashClient/__init__.py | 9 ++++- UnleashClient/environment_resolver.py | 26 ++++++++++++++ tests/unit_tests/test_environment_resolver.py | 35 +++++++++++++++++++ 3 files changed, 69 insertions(+), 1 deletion(-) create mode 100644 UnleashClient/environment_resolver.py create mode 100644 tests/unit_tests/test_environment_resolver.py diff --git a/UnleashClient/__init__.py b/UnleashClient/__init__.py index 9c065e15..2bc53cc2 100644 --- a/UnleashClient/__init__.py +++ b/UnleashClient/__init__.py @@ -35,6 +35,7 @@ SDK_NAME, SDK_VERSION, ) +from UnleashClient.environment_resolver import extract_environment_from_headers from UnleashClient.events import ( BaseEvent, UnleashEvent, @@ -206,8 +207,14 @@ def __init__( self.fl_job: Job = None self.metric_job: Job = None self.engine = UnleashEngine() + + impact_metrics_environment = self.unleash_environment + extracted_env = extract_environment_from_headers(self.unleash_custom_headers) + if extracted_env: + impact_metrics_environment = extracted_env + self._impact_metrics = ImpactMetrics( - self.engine, self.unleash_app_name, self.unleash_environment + self.engine, self.unleash_app_name, impact_metrics_environment ) self.cache = cache or FileCache( diff --git a/UnleashClient/environment_resolver.py b/UnleashClient/environment_resolver.py new file mode 100644 index 00000000..68722f1e --- /dev/null +++ b/UnleashClient/environment_resolver.py @@ -0,0 +1,26 @@ +from typing import Dict, Optional + + +def extract_environment_from_headers( + headers: Optional[Dict[str, str]], +) -> Optional[str]: + if not headers: + return None + + auth_key = next( + (key for key in headers if key.lower() == "authorization"), + None, + ) + if not auth_key: + return None + + auth_value = headers.get(auth_key) + if not auth_value: + return None + + _, sep, after_colon = auth_value.partition(":") + if not sep: + return None + + environment, _, _ = after_colon.partition(".") + return environment or None diff --git a/tests/unit_tests/test_environment_resolver.py b/tests/unit_tests/test_environment_resolver.py new file mode 100644 index 00000000..1f31b11e --- /dev/null +++ b/tests/unit_tests/test_environment_resolver.py @@ -0,0 +1,35 @@ +from UnleashClient.environment_resolver import extract_environment_from_headers + + +def test_valid_headers(): + custom_headers = { + "Authorization": "project:environment.hash", + "Content-Type": "application/json", + } + + result = extract_environment_from_headers(custom_headers) + assert result == "environment" + + +def test_case_insensitive_header_keys(): + custom_headers = { + "AUTHORIZATION": "project:environment.hash", + "Content-Type": "application/json", + } + + result = extract_environment_from_headers(custom_headers) + assert result == "environment" + + +def test_authorization_header_not_present(): + result = extract_environment_from_headers({}) + assert result is None + + +def test_environment_part_is_empty(): + custom_headers = { + "Authorization": "project:.hash", + } + + result = extract_environment_from_headers(custom_headers) + assert result is None From bf554e920b7d35338248c30b682fffe45284d7ef Mon Sep 17 00:00:00 2001 From: Mateusz Kwasniewski Date: Wed, 21 Jan 2026 13:49:23 +0100 Subject: [PATCH 09/14] feat: impact metrics --- UnleashClient/__init__.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/UnleashClient/__init__.py b/UnleashClient/__init__.py index 2bc53cc2..c7b8b838 100644 --- a/UnleashClient/__init__.py +++ b/UnleashClient/__init__.py @@ -213,7 +213,7 @@ def __init__( if extracted_env: impact_metrics_environment = extracted_env - self._impact_metrics = ImpactMetrics( + self.impact_metrics = ImpactMetrics( self.engine, self.unleash_app_name, impact_metrics_environment ) @@ -284,9 +284,6 @@ def connection_id(self): def is_initialized(self): return self._run_state == _RunState.INITIALIZED - @property - def impact_metrics(self) -> ImpactMetrics: - return self._impact_metrics def initialize_client(self, fetch_toggles: bool = True) -> None: """ From d4b97940db44bdcff9cec0e0f0e77975ed40bf2c Mon Sep 17 00:00:00 2001 From: Mateusz Kwasniewski Date: Wed, 21 Jan 2026 13:54:54 +0100 Subject: [PATCH 10/14] feat: impact metrics --- UnleashClient/impact_metrics.py | 40 +++++++++++++++++---------------- 1 file changed, 21 insertions(+), 19 deletions(-) diff --git a/UnleashClient/impact_metrics.py b/UnleashClient/impact_metrics.py index 67213af6..b50ce018 100644 --- a/UnleashClient/impact_metrics.py +++ b/UnleashClient/impact_metrics.py @@ -20,8 +20,10 @@ class ImpactMetrics: def __init__(self, engine: UnleashEngine, app_name: str, environment: str): self._engine = engine - self._app_name = app_name - self._environment = environment + self._base_labels = { + "appName": app_name, + "environment": environment, + } def define_counter(self, name: str, help_text: str) -> None: self._engine.define_counter(name, help_text) @@ -61,24 +63,24 @@ def observe_histogram( labels = self._resolve_labels(flag_context) self._engine.observe_histogram(name, value, labels) + def _variant_label(self, flag_name: str, context: Dict[str, Any]) -> str: + variant = self._engine.get_variant(flag_name, context) + if variant and variant.enabled: + return variant.name + if variant and variant.feature_enabled: + return "enabled" + return "disabled" + def _resolve_labels( self, flag_context: Optional[MetricFlagContext] ) -> Dict[str, str]: - labels: Dict[str, str] = { - "appName": self._app_name, - "environment": self._environment, + if not flag_context: + return dict(self._base_labels) + + return { + **self._base_labels, + **{ + flag: self._variant_label(flag, flag_context.context) + for flag in flag_context.flag_names + }, } - - if flag_context is None: - return labels - - for flag_name in flag_context.flag_names: - variant = self._engine.get_variant(flag_name, flag_context.context) - if variant and variant.enabled: - labels[flag_name] = variant.name - elif variant and variant.feature_enabled: - labels[flag_name] = "enabled" - else: - labels[flag_name] = "disabled" - - return labels From d41836b6b4e07edb07e468503ac50a08f39e78d9 Mon Sep 17 00:00:00 2001 From: Mateusz Kwasniewski Date: Wed, 21 Jan 2026 14:07:18 +0100 Subject: [PATCH 11/14] feat: impact metrics --- UnleashClient/periodic_tasks/send_metrics.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/UnleashClient/periodic_tasks/send_metrics.py b/UnleashClient/periodic_tasks/send_metrics.py index 727a1367..dba31571 100644 --- a/UnleashClient/periodic_tasks/send_metrics.py +++ b/UnleashClient/periodic_tasks/send_metrics.py @@ -19,7 +19,12 @@ def aggregate_and_send_metrics( engine: UnleashEngine, ) -> None: metrics_bucket = engine.get_metrics() - impact_metrics = engine.collect_impact_metrics() + + try: + impact_metrics = engine.collect_impact_metrics() + except Exception as exc: + LOGGER.warning("Failed to collect impact metrics: %s", exc) + impact_metrics = None metrics_request = { "appName": app_name, From bb4445f4900cbc53838648661dfadd69cb72b08d Mon Sep 17 00:00:00 2001 From: Mateusz Kwasniewski Date: Thu, 22 Jan 2026 12:45:09 +0100 Subject: [PATCH 12/14] feat: impact metrics --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index b85f09d2..72414545 100644 --- a/requirements.txt +++ b/requirements.txt @@ -6,7 +6,7 @@ mmhash3 python-dateutil requests semver -yggdrasil-engine>=1.1.0 +yggdrasil-engine>=1.2.0 launchdarkly-eventsource # Development packages From a935bd3ed8e30bfcbd28651afeab3a9826c8468d Mon Sep 17 00:00:00 2001 From: Mateusz Kwasniewski Date: Thu, 22 Jan 2026 13:32:43 +0100 Subject: [PATCH 13/14] feat: impact metrics --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 72414545..a7c5456a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -6,7 +6,7 @@ mmhash3 python-dateutil requests semver -yggdrasil-engine>=1.2.0 +yggdrasil-engine>=1.2.1 launchdarkly-eventsource # Development packages From eeb1e489cda2a31484e4649a7d88da3eb53d6dc1 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Thu, 22 Jan 2026 12:32:58 +0000 Subject: [PATCH 14/14] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- UnleashClient/__init__.py | 1 - 1 file changed, 1 deletion(-) diff --git a/UnleashClient/__init__.py b/UnleashClient/__init__.py index c7b8b838..3a6e04d3 100644 --- a/UnleashClient/__init__.py +++ b/UnleashClient/__init__.py @@ -284,7 +284,6 @@ def connection_id(self): def is_initialized(self): return self._run_state == _RunState.INITIALIZED - def initialize_client(self, fetch_toggles: bool = True) -> None: """ Initializes client and starts communication with central unleash server(s).