diff --git a/README.md b/README.md index 763770a4..1658878c 100644 --- a/README.md +++ b/README.md @@ -319,6 +319,56 @@ client.is_enabled("testFlag") ``` +### Impact metrics + +Impact metrics are lightweight, application-level time-series metrics stored and visualized directly inside Unleash. They allow you to connect specific application data, such as request counts, error rates, or latency, to your feature flags and release plans. + +These metrics help validate feature impact and automate release processes. For instance, you can monitor usage patterns or performance to determine if a feature meets its goals. + +The SDK automatically attaches context labels to metrics: `appName` and `environment`. + +#### Counters + +Use counters for cumulative values that only increase (total requests, errors): + +```python +client.impact_metrics.define_counter( + "request_count", + "Total number of HTTP requests processed" +) + +client.impact_metrics.increment_counter("request_count") +``` + +#### Gauges + +Use gauges for point-in-time values that can go up or down: + +```python +client.impact_metrics.define_gauge( + "total_users", + "Total number of registered users" +) + +client.impact_metrics.update_gauge("total_users", user_count) +``` + +#### Histograms + +Histograms measure value distribution (request duration, response size): + +```python +client.impact_metrics.define_histogram( + "request_time_ms", + "Time taken to process a request in milliseconds", + [50, 100, 200, 500, 1000] +) + +client.impact_metrics.observe_histogram("request_time_ms", 125) +``` + +Impact metrics are batched and sent using the same interval as standard SDK metrics. + ### Custom cache By default, the Python SDK stores feature flags in an on-disk cache using fcache. If you need a different storage backend, for example, Redis, memory-only, or a custom database, you can provide your own cache implementation. diff --git a/UnleashClient/__init__.py b/UnleashClient/__init__.py index 096b7d2f..3a6e04d3 100644 --- a/UnleashClient/__init__.py +++ b/UnleashClient/__init__.py @@ -35,12 +35,14 @@ SDK_NAME, SDK_VERSION, ) +from UnleashClient.environment_resolver import extract_environment_from_headers from UnleashClient.events import ( BaseEvent, UnleashEvent, UnleashEventType, UnleashReadyEvent, ) +from UnleashClient.impact_metrics import ImpactMetrics from UnleashClient.periodic_tasks import ( aggregate_and_send_metrics, ) @@ -206,6 +208,15 @@ def __init__( self.metric_job: Job = None self.engine = UnleashEngine() + impact_metrics_environment = self.unleash_environment + extracted_env = extract_environment_from_headers(self.unleash_custom_headers) + if extracted_env: + impact_metrics_environment = extracted_env + + self.impact_metrics = ImpactMetrics( + self.engine, self.unleash_app_name, impact_metrics_environment + ) + self.cache = cache or FileCache( self.unleash_app_name, directory=cache_directory ) diff --git a/UnleashClient/environment_resolver.py b/UnleashClient/environment_resolver.py new file mode 100644 index 00000000..68722f1e --- /dev/null +++ b/UnleashClient/environment_resolver.py @@ -0,0 +1,26 @@ +from typing import Dict, Optional + + +def extract_environment_from_headers( + headers: Optional[Dict[str, str]], +) -> Optional[str]: + if not headers: + return None + + auth_key = next( + (key for key in headers if key.lower() == "authorization"), + None, + ) + if not auth_key: + return None + + auth_value = headers.get(auth_key) + if not auth_value: + return None + + _, sep, after_colon = auth_value.partition(":") + if not sep: + return None + + environment, _, _ = after_colon.partition(".") + return environment or None diff --git a/UnleashClient/impact_metrics.py b/UnleashClient/impact_metrics.py new file mode 100644 index 00000000..b50ce018 --- /dev/null +++ b/UnleashClient/impact_metrics.py @@ -0,0 +1,86 @@ +from dataclasses import dataclass, field +from typing import Any, Dict, List, Optional + +from yggdrasil_engine.engine import UnleashEngine + + +@dataclass +class MetricFlagContext: + """Context for resolving feature flag values as metric labels.""" + + flag_names: List[str] = field(default_factory=list) + context: Dict[str, Any] = field(default_factory=dict) + + +class ImpactMetrics: + """ + Provides methods to define and record metrics (counters, gauges, histograms) + with optional feature flag context that gets resolved to labels. + """ + + def __init__(self, engine: UnleashEngine, app_name: str, environment: str): + self._engine = engine + self._base_labels = { + "appName": app_name, + "environment": environment, + } + + def define_counter(self, name: str, help_text: str) -> None: + self._engine.define_counter(name, help_text) + + def increment_counter( + self, + name: str, + value: int = 1, + flag_context: Optional[MetricFlagContext] = None, + ) -> None: + labels = self._resolve_labels(flag_context) + self._engine.inc_counter(name, value, labels) + + def define_gauge(self, name: str, help_text: str) -> None: + self._engine.define_gauge(name, help_text) + + def update_gauge( + self, + name: str, + value: float, + flag_context: Optional[MetricFlagContext] = None, + ) -> None: + labels = self._resolve_labels(flag_context) + self._engine.set_gauge(name, value, labels) + + def define_histogram( + self, name: str, help_text: str, buckets: Optional[List[float]] = None + ) -> None: + self._engine.define_histogram(name, help_text, buckets) + + def observe_histogram( + self, + name: str, + value: float, + flag_context: Optional[MetricFlagContext] = None, + ) -> None: + labels = self._resolve_labels(flag_context) + self._engine.observe_histogram(name, value, labels) + + def _variant_label(self, flag_name: str, context: Dict[str, Any]) -> str: + variant = self._engine.get_variant(flag_name, context) + if variant and variant.enabled: + return variant.name + if variant and variant.feature_enabled: + return "enabled" + return "disabled" + + def _resolve_labels( + self, flag_context: Optional[MetricFlagContext] + ) -> Dict[str, str]: + if not flag_context: + return dict(self._base_labels) + + return { + **self._base_labels, + **{ + flag: self._variant_label(flag, flag_context.context) + for flag in flag_context.flag_names + }, + } diff --git a/UnleashClient/periodic_tasks/send_metrics.py b/UnleashClient/periodic_tasks/send_metrics.py index 55fed20f..dba31571 100644 --- a/UnleashClient/periodic_tasks/send_metrics.py +++ b/UnleashClient/periodic_tasks/send_metrics.py @@ -20,6 +20,12 @@ def aggregate_and_send_metrics( ) -> None: metrics_bucket = engine.get_metrics() + try: + impact_metrics = engine.collect_impact_metrics() + except Exception as exc: + LOGGER.warning("Failed to collect impact metrics: %s", exc) + impact_metrics = None + metrics_request = { "appName": app_name, "instanceId": instance_id, @@ -31,7 +37,14 @@ def aggregate_and_send_metrics( "specVersion": CLIENT_SPEC_VERSION, } - if metrics_bucket: - send_metrics(url, metrics_request, headers, custom_options, request_timeout) + if impact_metrics: + metrics_request["impactMetrics"] = impact_metrics + + if metrics_bucket or impact_metrics: + success = send_metrics( + url, metrics_request, headers, custom_options, request_timeout + ) + if not success and impact_metrics: + engine.restore_impact_metrics(impact_metrics) else: LOGGER.debug("No feature flags with metrics, skipping metrics submission.") diff --git a/requirements.txt b/requirements.txt index 55041ecb..a7c5456a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -6,7 +6,7 @@ mmhash3 python-dateutil requests semver -yggdrasil-engine>=1.0.0 +yggdrasil-engine>=1.2.1 launchdarkly-eventsource # Development packages diff --git a/tests/unit_tests/test_environment_resolver.py b/tests/unit_tests/test_environment_resolver.py new file mode 100644 index 00000000..1f31b11e --- /dev/null +++ b/tests/unit_tests/test_environment_resolver.py @@ -0,0 +1,35 @@ +from UnleashClient.environment_resolver import extract_environment_from_headers + + +def test_valid_headers(): + custom_headers = { + "Authorization": "project:environment.hash", + "Content-Type": "application/json", + } + + result = extract_environment_from_headers(custom_headers) + assert result == "environment" + + +def test_case_insensitive_header_keys(): + custom_headers = { + "AUTHORIZATION": "project:environment.hash", + "Content-Type": "application/json", + } + + result = extract_environment_from_headers(custom_headers) + assert result == "environment" + + +def test_authorization_header_not_present(): + result = extract_environment_from_headers({}) + assert result is None + + +def test_environment_part_is_empty(): + custom_headers = { + "Authorization": "project:.hash", + } + + result = extract_environment_from_headers(custom_headers) + assert result is None diff --git a/tests/unit_tests/test_impact_metrics.py b/tests/unit_tests/test_impact_metrics.py new file mode 100644 index 00000000..8a8065dc --- /dev/null +++ b/tests/unit_tests/test_impact_metrics.py @@ -0,0 +1,193 @@ +import json + +import pytest +import responses + +from tests.utilities.testing_constants import ( + APP_NAME, + ENVIRONMENT, + INSTANCE_ID, + URL, +) +from UnleashClient import INSTANCES, UnleashClient +from UnleashClient.cache import FileCache +from UnleashClient.constants import METRICS_URL +from UnleashClient.impact_metrics import MetricFlagContext +from UnleashClient.periodic_tasks import aggregate_and_send_metrics + +MOCK_FEATURES_RESPONSE = { + "version": 1, + "features": [ + { + "name": "feature-with-variant", + "enabled": True, + "strategies": [{"name": "default", "parameters": {}}], + "variants": [{"name": "treatment", "weight": 1000}], + }, + { + "name": "enabled-feature", + "enabled": True, + "strategies": [{"name": "default", "parameters": {}}], + }, + { + "name": "disabled-feature", + "enabled": False, + "strategies": [{"name": "default", "parameters": {}}], + }, + ], +} + + +@pytest.fixture(autouse=True) +def before_each(): + INSTANCES._reset() + + +@pytest.fixture +def unleash_client(): + cache = FileCache("TEST_CACHE") + cache.bootstrap_from_dict(MOCK_FEATURES_RESPONSE) + client = UnleashClient( + url=URL, + app_name=APP_NAME, + environment=ENVIRONMENT, + instance_id=INSTANCE_ID, + disable_metrics=True, + disable_registration=True, + cache=cache, + ) + client.initialize_client(fetch_toggles=False) + yield client + client.destroy() + + +class TestSendMetricsViaClient: + @responses.activate + def test_impact_metrics_in_payload(self, unleash_client): + responses.add(responses.POST, URL + METRICS_URL, json={}, status=202) + + unleash_client.impact_metrics.define_counter("purchases", "Number of purchases") + unleash_client.impact_metrics.increment_counter("purchases", 1) + + unleash_client.impact_metrics.define_gauge("active_users", "Active users") + unleash_client.impact_metrics.update_gauge("active_users", 42) + + unleash_client.impact_metrics.define_histogram( + "latency", "Request latency", [0.1, 0.5, 1.0] + ) + unleash_client.impact_metrics.observe_histogram("latency", 0.3) + + aggregate_and_send_metrics( + url=URL, + app_name=APP_NAME, + instance_id=INSTANCE_ID, + connection_id="test-connection", + headers={}, + custom_options={}, + request_timeout=30, + engine=unleash_client.engine, + ) + + request_body = json.loads(responses.calls[0].request.body) + metrics = {m["name"]: m for m in request_body["impactMetrics"]} + + expected_labels = {"appName": APP_NAME, "environment": ENVIRONMENT} + + assert metrics == { + "purchases": { + "name": "purchases", + "help": "Number of purchases", + "type": "counter", + "samples": [{"labels": expected_labels, "value": 1}], + }, + "active_users": { + "name": "active_users", + "help": "Active users", + "type": "gauge", + "samples": [{"labels": expected_labels, "value": 42}], + }, + "latency": { + "name": "latency", + "help": "Request latency", + "type": "histogram", + "samples": [ + { + "labels": expected_labels, + "count": 1, + "sum": 0.3, + "buckets": [ + {"le": 0.1, "count": 0}, + {"le": 0.5, "count": 1}, + {"le": 1.0, "count": 1}, + {"le": "+Inf", "count": 1}, + ], + } + ], + }, + } + + @responses.activate + def test_impact_metrics_with_flag_context(self, unleash_client): + responses.add(responses.POST, URL + METRICS_URL, json={}, status=202) + + flag_context = MetricFlagContext( + flag_names=["feature-with-variant", "enabled-feature", "disabled-feature"], + context={"userId": "123"}, + ) + + unleash_client.impact_metrics.define_counter("purchases", "Number of purchases") + unleash_client.impact_metrics.increment_counter("purchases", 1, flag_context) + + aggregate_and_send_metrics( + url=URL, + app_name=APP_NAME, + instance_id=INSTANCE_ID, + connection_id="test-connection", + headers={}, + custom_options={}, + request_timeout=30, + engine=unleash_client.engine, + ) + + request_body = json.loads(responses.calls[0].request.body) + labels = request_body["impactMetrics"][0]["samples"][0]["labels"] + + assert labels == { + "appName": APP_NAME, + "environment": ENVIRONMENT, + "feature-with-variant": "treatment", + "enabled-feature": "enabled", + "disabled-feature": "disabled", + } + + @responses.activate + def test_impact_metrics_resent_after_failure(self, unleash_client): + responses.add(responses.POST, URL + METRICS_URL, json={}, status=500) + responses.add(responses.POST, URL + METRICS_URL, json={}, status=202) + + unleash_client.impact_metrics.define_counter("my_counter", "Test counter") + unleash_client.impact_metrics.increment_counter("my_counter", 5) + + aggregate_and_send_metrics( + url=URL, + app_name=APP_NAME, + instance_id=INSTANCE_ID, + connection_id="test-connection", + headers={}, + custom_options={}, + request_timeout=30, + engine=unleash_client.engine, + ) + aggregate_and_send_metrics( + url=URL, + app_name=APP_NAME, + instance_id=INSTANCE_ID, + connection_id="test-connection", + headers={}, + custom_options={}, + request_timeout=30, + engine=unleash_client.engine, + ) + + request_body = json.loads(responses.calls[1].request.body) + assert request_body["impactMetrics"][0]["name"] == "my_counter"