diff --git a/dd-trace-api/src/main/java/datadog/trace/api/config/GeneralConfig.java b/dd-trace-api/src/main/java/datadog/trace/api/config/GeneralConfig.java
index 60af53815fc..9f1a458ad3d 100644
--- a/dd-trace-api/src/main/java/datadog/trace/api/config/GeneralConfig.java
+++ b/dd-trace-api/src/main/java/datadog/trace/api/config/GeneralConfig.java
@@ -76,6 +76,9 @@ public final class GeneralConfig {
public static final String TRACER_METRICS_MAX_PENDING = "trace.tracer.metrics.max.pending";
public static final String TRACER_METRICS_IGNORED_RESOURCES =
"trace.tracer.metrics.ignored.resources";
+ public static final String TRACE_STATS_ADDITIONAL_TAGS = "trace.stats.additional.tags";
+ public static final String TRACE_STATS_ADDITIONAL_TAGS_CARDINALITY_LIMIT =
+ "trace.stats.additional.tags.cardinality.limit";
public static final String AZURE_APP_SERVICES = "azure.app.services";
public static final String INTERNAL_EXIT_ON_FAILURE = "trace.internal.exit.on.failure";
diff --git a/dd-trace-core/src/jmh/java/datadog/trace/common/metrics/ConflatingMetricsAggregatorBenchmark.java b/dd-trace-core/src/jmh/java/datadog/trace/common/metrics/ConflatingMetricsAggregatorBenchmark.java
index 971ee5cf6e4..b66e2cfc266 100644
--- a/dd-trace-core/src/jmh/java/datadog/trace/common/metrics/ConflatingMetricsAggregatorBenchmark.java
+++ b/dd-trace-core/src/jmh/java/datadog/trace/common/metrics/ConflatingMetricsAggregatorBenchmark.java
@@ -40,6 +40,8 @@ public class ConflatingMetricsAggregatorBenchmark {
new ConflatingMetricsAggregator(
new WellKnownTags("", "", "", "", "", ""),
Collections.emptySet(),
+ Collections.emptySet(),
+ 100,
featuresDiscovery,
HealthMetrics.NO_OP,
new NullSink(),
diff --git a/dd-trace-core/src/main/java/datadog/trace/common/metrics/AdditionalTagsCardinalityLimiter.java b/dd-trace-core/src/main/java/datadog/trace/common/metrics/AdditionalTagsCardinalityLimiter.java
new file mode 100644
index 00000000000..6c35134aad9
--- /dev/null
+++ b/dd-trace-core/src/main/java/datadog/trace/common/metrics/AdditionalTagsCardinalityLimiter.java
@@ -0,0 +1,92 @@
+package datadog.trace.common.metrics;
+
+import datadog.trace.core.monitor.HealthMetrics;
+import java.util.Collections;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Bounded per-tag cardinality protection for `additional_metric_tags`.
+ *
+ *
For each configured tag key, admits at most {@code limitPerTag} distinct values within a
+ * rolling window. Excess values are replaced with {@link #BLOCKED_VALUE} so the span's base stats
+ * still flow through but the extra dimension is suppressed.
+ *
+ *
The rolling window is implemented as a hard reset: callers schedule {@link #reset()} on a
+ * fixed interval (10 minutes by default). After a reset, previously blocked values get a fresh
+ * chance to be admitted.
+ */
+final class AdditionalTagsCardinalityLimiter {
+
+ static final String BLOCKED_VALUE = "blocked_by_tracer";
+
+ private static final Logger log = LoggerFactory.getLogger(AdditionalTagsCardinalityLimiter.class);
+
+ private final int limitPerTag;
+ private final HealthMetrics healthMetrics;
+ private final ConcurrentHashMap> seenValuesPerTag = new ConcurrentHashMap<>();
+ private final Set warnedAboutCardinality =
+ Collections.newSetFromMap(new ConcurrentHashMap<>());
+ private final Set warnedAboutLength =
+ Collections.newSetFromMap(new ConcurrentHashMap<>());
+
+ AdditionalTagsCardinalityLimiter(int limitPerTag, HealthMetrics healthMetrics) {
+ this.limitPerTag = limitPerTag;
+ this.healthMetrics = healthMetrics;
+ }
+
+ /**
+ * @return {@code value} if admitted under the cap, otherwise {@link #BLOCKED_VALUE}.
+ */
+ String admitOrBlock(String tagKey, String value) {
+ Set seen =
+ seenValuesPerTag.computeIfAbsent(
+ tagKey, k -> Collections.newSetFromMap(new ConcurrentHashMap<>()));
+ if (seen.contains(value)) {
+ return value;
+ }
+ if (seen.size() >= limitPerTag) {
+ healthMetrics.onAdditionalTagValueCardinalityBlocked(tagKey);
+ if (warnedAboutCardinality.add(tagKey)) {
+ log.warn(
+ "Additional metric tag '{}' exceeded the per-tag cardinality limit of {}; "
+ + "replacing values with '{}' for the rest of the current window",
+ tagKey,
+ limitPerTag,
+ BLOCKED_VALUE);
+ }
+ return BLOCKED_VALUE;
+ }
+ seen.add(value);
+ return value;
+ }
+
+ /**
+ * Records that a value for {@code tagKey} was blocked due to exceeding the per-value length cap.
+ * Fires the same health metric as a cardinality block and emits a distinct warn log line once per
+ * tag key per window.
+ */
+ void noteBlockedDueToLength(String tagKey, int valueLength, int maxLength) {
+ healthMetrics.onAdditionalTagValueCardinalityBlocked(tagKey);
+ if (warnedAboutLength.add(tagKey)) {
+ log.warn(
+ "Additional metric tag '{}' had a value of length {} exceeding the max length of {}; "
+ + "replacing with '{}' for the rest of the current window",
+ tagKey,
+ valueLength,
+ maxLength,
+ BLOCKED_VALUE);
+ }
+ }
+
+ /** Clears per-tag value sets and rearms the per-key log lines. Invoked by the periodic task. */
+ void reset() {
+ for (Set seen : seenValuesPerTag.values()) {
+ seen.clear();
+ }
+ warnedAboutCardinality.clear();
+ warnedAboutLength.clear();
+ }
+}
diff --git a/dd-trace-core/src/main/java/datadog/trace/common/metrics/ConflatingMetricsAggregator.java b/dd-trace-core/src/main/java/datadog/trace/common/metrics/ConflatingMetricsAggregator.java
index f60edf1d700..1a9b5117f17 100644
--- a/dd-trace-core/src/main/java/datadog/trace/common/metrics/ConflatingMetricsAggregator.java
+++ b/dd-trace-core/src/main/java/datadog/trace/common/metrics/ConflatingMetricsAggregator.java
@@ -80,6 +80,16 @@ public final class ConflatingMetricsAggregator implements MetricsAggregator, Eve
Pair.of(
DDCaches.newFixedSizeCache(512),
value -> UTF8BytesString.create(key + ":" + value));
+ private static final DDCache<
+ String, Pair, Function>>
+ ADDITIONAL_TAG_VALUES_CACHE = DDCaches.newFixedSizeCache(64);
+ private static final Function<
+ String, Pair, Function>>
+ ADDITIONAL_TAG_VALUES_CACHE_ADDER =
+ key ->
+ Pair.of(
+ DDCaches.newFixedSizeCache(512),
+ value -> UTF8BytesString.create(key + ":" + value));
private static final CharSequence SYNTHETICS_ORIGIN = "synthetics";
private static final Set ELIGIBLE_SPAN_KINDS_FOR_METRICS =
@@ -92,7 +102,21 @@ public final class ConflatingMetricsAggregator implements MetricsAggregator, Eve
unmodifiableSet(
new HashSet<>(Arrays.asList(SPAN_KIND_CLIENT, SPAN_KIND_PRODUCER, SPAN_KIND_CONSUMER)));
+ // Cap on configured additional metric tag keys. By default only 4 primary tag dimensions are
+ // supported.
+ // We sometimes increase this limit for users so a value of 10 allows us to protect against
+ // extreme misconfiguration
+ // while still allowing some additional tags to be used.
+ static final int MAX_ADDITIONAL_TAG_KEYS = 10;
+
+ // Maximum length of an additional metric tag *value*. Caps cache footprint and wire payload
+ // size from stack-trace / JSON / SQL stuffed into a tag by misconfigured app code. Values
+ // exceeding this are emitted as `:blocked_by_tracer`.
+ static final int MAX_ADDITIONAL_TAG_VALUE_LENGTH = 250;
+
private final Set ignoredResources;
+ private final List additionalTagKeys;
+ private final AdditionalTagsCardinalityLimiter cardinalityLimiter;
private final MessagePassingQueue batchPool;
private final ConcurrentHashMap pending;
private final ConcurrentHashMap keys;
@@ -107,6 +131,10 @@ public final class ConflatingMetricsAggregator implements MetricsAggregator, Eve
private final boolean includeEndpointInMetrics;
private volatile AgentTaskScheduler.Scheduled> cancellation;
+ private volatile AgentTaskScheduler.Scheduled> cardinalityResetCancellation;
+
+ // Hard-reset window for per-tag value cardinality tracking.
+ static final long CARDINALITY_RESET_INTERVAL_MINUTES = 10;
public ConflatingMetricsAggregator(
Config config,
@@ -115,6 +143,8 @@ public ConflatingMetricsAggregator(
this(
config.getWellKnownTags(),
config.getMetricsIgnoredResources(),
+ config.getTraceStatsAdditionalTags(),
+ config.getTraceStatsAdditionalTagsCardinalityLimit(),
sharedCommunicationObjects.featuresDiscovery(config),
healthMetrics,
new OkHttpSink(
@@ -132,6 +162,8 @@ public ConflatingMetricsAggregator(
ConflatingMetricsAggregator(
WellKnownTags wellKnownTags,
Set ignoredResources,
+ Set additionalTagKeys,
+ int additionalTagsCardinalityLimit,
DDAgentFeaturesDiscovery features,
HealthMetrics healthMetric,
Sink sink,
@@ -141,6 +173,8 @@ public ConflatingMetricsAggregator(
this(
wellKnownTags,
ignoredResources,
+ additionalTagKeys,
+ additionalTagsCardinalityLimit,
features,
healthMetric,
sink,
@@ -154,6 +188,8 @@ public ConflatingMetricsAggregator(
ConflatingMetricsAggregator(
WellKnownTags wellKnownTags,
Set ignoredResources,
+ Set additionalTagKeys,
+ int additionalTagsCardinalityLimit,
DDAgentFeaturesDiscovery features,
HealthMetrics healthMetric,
Sink sink,
@@ -164,6 +200,8 @@ public ConflatingMetricsAggregator(
boolean includeEndpointInMetrics) {
this(
ignoredResources,
+ additionalTagKeys,
+ additionalTagsCardinalityLimit,
features,
healthMetric,
sink,
@@ -177,6 +215,8 @@ public ConflatingMetricsAggregator(
ConflatingMetricsAggregator(
Set ignoredResources,
+ Set additionalTagKeys,
+ int additionalTagsCardinalityLimit,
DDAgentFeaturesDiscovery features,
HealthMetrics healthMetric,
Sink sink,
@@ -187,6 +227,9 @@ public ConflatingMetricsAggregator(
TimeUnit timeUnit,
boolean includeEndpointInMetrics) {
this.ignoredResources = ignoredResources;
+ this.additionalTagKeys = normalizeAdditionalTagKeys(additionalTagKeys);
+ this.cardinalityLimiter =
+ new AdditionalTagsCardinalityLimiter(additionalTagsCardinalityLimit, healthMetric);
this.includeEndpointInMetrics = includeEndpointInMetrics;
this.inbox = Queues.mpscArrayQueue(queueSize);
this.batchPool = Queues.spmcArrayQueue(maxAggregates);
@@ -223,6 +266,14 @@ public void start() {
reportingInterval,
reportingInterval,
reportingIntervalTimeUnit);
+ cardinalityResetCancellation =
+ AgentTaskScheduler.get()
+ .scheduleAtFixedRate(
+ new CardinalityResetTask(),
+ this,
+ CARDINALITY_RESET_INTERVAL_MINUTES,
+ CARDINALITY_RESET_INTERVAL_MINUTES,
+ TimeUnit.MINUTES);
log.debug("started metrics aggregator");
}
@@ -350,7 +401,8 @@ private boolean publish(CoreSpan> span, boolean isTopLevel, CharSequence spanK
getPeerTags(span, spanKind.toString()),
httpMethod,
httpEndpoint,
- grpcStatusCode);
+ grpcStatusCode,
+ getAdditionalTags(span));
MetricKey key = keys.putIfAbsent(newKey, newKey);
if (null == key) {
key = newKey;
@@ -413,6 +465,55 @@ private List getPeerTags(CoreSpan> span, String spanKind) {
return Collections.emptyList();
}
+ static List normalizeAdditionalTagKeys(Set configured) {
+ if (configured == null || configured.isEmpty()) {
+ return Collections.emptyList();
+ }
+ List sorted = new ArrayList<>(configured);
+ Collections.sort(sorted);
+ if (sorted.size() > MAX_ADDITIONAL_TAG_KEYS) {
+ log.warn(
+ "Configured additional metric tag keys ({}) exceeds the supported limit of {}; "
+ + "dropping extra keys: {}",
+ sorted.size(),
+ MAX_ADDITIONAL_TAG_KEYS,
+ sorted.subList(MAX_ADDITIONAL_TAG_KEYS, sorted.size()));
+ sorted = sorted.subList(0, MAX_ADDITIONAL_TAG_KEYS);
+ }
+ return Collections.unmodifiableList(new ArrayList<>(sorted));
+ }
+
+ private List getAdditionalTags(CoreSpan> span) {
+ if (additionalTagKeys.isEmpty()) {
+ return Collections.emptyList();
+ }
+ List result = null;
+ for (String tagKey : additionalTagKeys) {
+ Object value = span.unsafeGetTag(tagKey);
+ if (value == null) {
+ continue;
+ }
+ String rawValue = value.toString();
+ String admittedValue;
+ if (rawValue.length() > MAX_ADDITIONAL_TAG_VALUE_LENGTH) {
+ cardinalityLimiter.noteBlockedDueToLength(
+ tagKey, rawValue.length(), MAX_ADDITIONAL_TAG_VALUE_LENGTH);
+ admittedValue = AdditionalTagsCardinalityLimiter.BLOCKED_VALUE;
+ } else {
+ admittedValue = cardinalityLimiter.admitOrBlock(tagKey, rawValue);
+ }
+ Pair, Function> cacheAndCreator =
+ ADDITIONAL_TAG_VALUES_CACHE.computeIfAbsent(tagKey, ADDITIONAL_TAG_VALUES_CACHE_ADDER);
+ UTF8BytesString formatted =
+ cacheAndCreator.getLeft().computeIfAbsent(admittedValue, cacheAndCreator.getRight());
+ if (result == null) {
+ result = new ArrayList<>(additionalTagKeys.size());
+ }
+ result.add(formatted);
+ }
+ return result == null ? Collections.emptyList() : result;
+ }
+
private static boolean isSynthetic(CoreSpan> span) {
return span.getOrigin() != null && SYNTHETICS_ORIGIN.equals(span.getOrigin().toString());
}
@@ -429,6 +530,9 @@ public void stop() {
if (null != cancellation) {
cancellation.cancel();
}
+ if (null != cardinalityResetCancellation) {
+ cardinalityResetCancellation.cancel();
+ }
inbox.offer(STOP);
}
@@ -482,4 +586,13 @@ public void run(ConflatingMetricsAggregator target) {
target.report();
}
}
+
+ private static final class CardinalityResetTask
+ implements AgentTaskScheduler.Task {
+
+ @Override
+ public void run(ConflatingMetricsAggregator target) {
+ target.cardinalityLimiter.reset();
+ }
+ }
}
diff --git a/dd-trace-core/src/main/java/datadog/trace/common/metrics/MetricKey.java b/dd-trace-core/src/main/java/datadog/trace/common/metrics/MetricKey.java
index 9e2e2098d1f..8ae11341d51 100644
--- a/dd-trace-core/src/main/java/datadog/trace/common/metrics/MetricKey.java
+++ b/dd-trace-core/src/main/java/datadog/trace/common/metrics/MetricKey.java
@@ -39,6 +39,7 @@ public final class MetricKey {
private final UTF8BytesString httpMethod;
private final UTF8BytesString httpEndpoint;
private final UTF8BytesString grpcStatusCode;
+ private final List additionalTags;
public MetricKey(
CharSequence resource,
@@ -53,7 +54,8 @@ public MetricKey(
List peerTags,
CharSequence httpMethod,
CharSequence httpEndpoint,
- CharSequence grpcStatusCode) {
+ CharSequence grpcStatusCode,
+ List additionalTags) {
this.resource = null == resource ? EMPTY : utf8(RESOURCE_CACHE, resource);
this.service = null == service ? EMPTY : utf8(SERVICE_CACHE, service);
this.serviceSource = null == serviceSource ? null : utf8(SERVICE_SOURCE_CACHE, serviceSource);
@@ -68,6 +70,7 @@ public MetricKey(
this.httpEndpoint = httpEndpoint == null ? null : utf8(HTTP_ENDPOINT_CACHE, httpEndpoint);
this.grpcStatusCode =
grpcStatusCode == null ? null : utf8(GRPC_STATUS_CODE_CACHE, grpcStatusCode);
+ this.additionalTags = additionalTags == null ? Collections.emptyList() : additionalTags;
int tmpHash = 0;
tmpHash = HashingUtils.addToHash(tmpHash, this.isTraceRoot);
@@ -83,6 +86,7 @@ public MetricKey(
tmpHash = HashingUtils.addToHash(tmpHash, this.httpEndpoint);
tmpHash = HashingUtils.addToHash(tmpHash, this.httpMethod);
tmpHash = HashingUtils.addToHash(tmpHash, this.grpcStatusCode);
+ tmpHash = HashingUtils.addToHash(tmpHash, this.additionalTags);
this.hash = tmpHash;
}
@@ -146,6 +150,10 @@ public UTF8BytesString getGrpcStatusCode() {
return grpcStatusCode;
}
+ public List getAdditionalTags() {
+ return additionalTags;
+ }
+
@Override
public boolean equals(Object o) {
if (this == o) {
@@ -166,7 +174,8 @@ public boolean equals(Object o) {
&& Objects.equals(serviceSource, metricKey.serviceSource)
&& Objects.equals(httpMethod, metricKey.httpMethod)
&& Objects.equals(httpEndpoint, metricKey.httpEndpoint)
- && Objects.equals(grpcStatusCode, metricKey.grpcStatusCode);
+ && Objects.equals(grpcStatusCode, metricKey.grpcStatusCode)
+ && additionalTags.equals(metricKey.additionalTags);
}
return false;
}
diff --git a/dd-trace-core/src/main/java/datadog/trace/common/metrics/SerializingMetricWriter.java b/dd-trace-core/src/main/java/datadog/trace/common/metrics/SerializingMetricWriter.java
index 0f84964e9db..b59f197a5e7 100644
--- a/dd-trace-core/src/main/java/datadog/trace/common/metrics/SerializingMetricWriter.java
+++ b/dd-trace-core/src/main/java/datadog/trace/common/metrics/SerializingMetricWriter.java
@@ -41,6 +41,7 @@ public final class SerializingMetricWriter implements MetricWriter {
private static final byte[] IS_TRACE_ROOT = "IsTraceRoot".getBytes(ISO_8859_1);
private static final byte[] SPAN_KIND = "SpanKind".getBytes(ISO_8859_1);
private static final byte[] PEER_TAGS = "PeerTags".getBytes(ISO_8859_1);
+ private static final byte[] ADDITIONAL_METRIC_TAGS = "AdditionalMetricTags".getBytes(ISO_8859_1);
private static final byte[] HTTP_METHOD = "HTTPMethod".getBytes(ISO_8859_1);
private static final byte[] HTTP_ENDPOINT = "HTTPEndpoint".getBytes(ISO_8859_1);
private static final byte[] GRPC_STATUS_CODE = "GRPCStatusCode".getBytes(ISO_8859_1);
@@ -148,12 +149,14 @@ public void add(MetricKey key, AggregateMetric aggregate) {
final boolean hasHttpEndpoint = key.getHttpEndpoint() != null;
final boolean hasServiceSource = key.getServiceSource() != null;
final boolean hasGrpcStatusCode = key.getGrpcStatusCode() != null;
+ final boolean hasAdditionalTags = !key.getAdditionalTags().isEmpty();
final int mapSize =
15
+ (hasServiceSource ? 1 : 0)
+ (hasHttpMethod ? 1 : 0)
+ (hasHttpEndpoint ? 1 : 0)
- + (hasGrpcStatusCode ? 1 : 0);
+ + (hasGrpcStatusCode ? 1 : 0)
+ + (hasAdditionalTags ? 1 : 0);
writer.startMap(mapSize);
@@ -189,6 +192,15 @@ public void add(MetricKey key, AggregateMetric aggregate) {
writer.writeUTF8(peerTag);
}
+ if (hasAdditionalTags) {
+ writer.writeUTF8(ADDITIONAL_METRIC_TAGS);
+ final List additionalTags = key.getAdditionalTags();
+ writer.startArray(additionalTags.size());
+ for (UTF8BytesString tag : additionalTags) {
+ writer.writeUTF8(tag);
+ }
+ }
+
if (hasServiceSource) {
writer.writeUTF8(SERVICE_SOURCE);
writer.writeUTF8(key.getServiceSource());
diff --git a/dd-trace-core/src/main/java/datadog/trace/core/monitor/HealthMetrics.java b/dd-trace-core/src/main/java/datadog/trace/core/monitor/HealthMetrics.java
index 257d887029b..c7dadc17eff 100644
--- a/dd-trace-core/src/main/java/datadog/trace/core/monitor/HealthMetrics.java
+++ b/dd-trace-core/src/main/java/datadog/trace/core/monitor/HealthMetrics.java
@@ -93,6 +93,8 @@ public void onClientStatDowngraded() {}
public void onStatsAggregateDropped() {}
+ public void onAdditionalTagValueCardinalityBlocked(String tagKey) {}
+
/**
* @return Human-readable summary of the current health metrics.
*/
diff --git a/dd-trace-core/src/main/java/datadog/trace/core/monitor/TracerHealthMetrics.java b/dd-trace-core/src/main/java/datadog/trace/core/monitor/TracerHealthMetrics.java
index 2df54241e56..7ecc9c316aa 100644
--- a/dd-trace-core/src/main/java/datadog/trace/core/monitor/TracerHealthMetrics.java
+++ b/dd-trace-core/src/main/java/datadog/trace/core/monitor/TracerHealthMetrics.java
@@ -98,6 +98,7 @@ public class TracerHealthMetrics extends HealthMetrics implements AutoCloseable
private final LongAdder clientStatsDowngrades = new LongAdder();
private final LongAdder statsAggregateDropped = new LongAdder();
+ private final LongAdder additionalTagValueCardinalityBlocked = new LongAdder();
private final StatsDClient statsd;
private final long interval;
@@ -352,6 +353,11 @@ public void onClientStatErrorReceived() {
clientStatsErrors.increment();
}
+ @Override
+ public void onAdditionalTagValueCardinalityBlocked(String tagKey) {
+ additionalTagValueCardinalityBlocked.increment();
+ }
+
@Override
public void onStatsAggregateDropped() {
statsAggregateDropped.increment();
@@ -504,6 +510,11 @@ public void run(TracerHealthMetrics target) {
"stats.dropped_aggregates",
target.statsAggregateDropped,
REASON_LRU_EVICTION_TAG);
+ reportIfChanged(
+ target.statsd,
+ "stats.additional_tag.cardinality_blocked",
+ target.additionalTagValueCardinalityBlocked,
+ NO_TAGS);
} catch (ArrayIndexOutOfBoundsException e) {
log.warn(
@@ -637,6 +648,8 @@ public String summary() {
+ "\nclientStatsProcessedTraces="
+ clientStatsProcessedTraces.sum()
+ "\nstatsAggregateDropped="
- + statsAggregateDropped.sum();
+ + statsAggregateDropped.sum()
+ + "\nadditionalTagValueCardinalityBlocked="
+ + additionalTagValueCardinalityBlocked.sum();
}
}
diff --git a/dd-trace-core/src/test/groovy/datadog/trace/common/metrics/AggregateMetricTest.groovy b/dd-trace-core/src/test/groovy/datadog/trace/common/metrics/AggregateMetricTest.groovy
index 0b245552db3..b5c4d2717e5 100644
--- a/dd-trace-core/src/test/groovy/datadog/trace/common/metrics/AggregateMetricTest.groovy
+++ b/dd-trace-core/src/test/groovy/datadog/trace/common/metrics/AggregateMetricTest.groovy
@@ -65,7 +65,7 @@ class AggregateMetricTest extends DDSpecification {
given:
AggregateMetric aggregate = new AggregateMetric().recordDurations(3, new AtomicLongArray(0L, 0L, 0L | ERROR_TAG | TOP_LEVEL_TAG))
- Batch batch = new Batch().reset(new MetricKey("foo", "bar", "qux", null, "type", 0, false, true, "corge", [UTF8BytesString.create("grault:quux")], null, null, null))
+ Batch batch = new Batch().reset(new MetricKey("foo", "bar", "qux", null, "type", 0, false, true, "corge", [UTF8BytesString.create("grault:quux")], null, null, null, null))
batch.add(0L, 10)
batch.add(0L, 10)
batch.add(0L, 10)
@@ -140,7 +140,7 @@ class AggregateMetricTest extends DDSpecification {
def "consistent under concurrent attempts to read and write"() {
given:
AggregateMetric aggregate = new AggregateMetric()
- MetricKey key = new MetricKey("foo", "bar", "qux", null, "type", 0, false, true, "corge", [UTF8BytesString.create("grault:quux")], null, null, null)
+ MetricKey key = new MetricKey("foo", "bar", "qux", null, "type", 0, false, true, "corge", [UTF8BytesString.create("grault:quux")], null, null, null, null)
BlockingDeque queue = new LinkedBlockingDeque<>(1000)
ExecutorService reader = Executors.newSingleThreadExecutor()
int writerCount = 10
diff --git a/dd-trace-core/src/test/groovy/datadog/trace/common/metrics/ConflatingMetricAggregatorTest.groovy b/dd-trace-core/src/test/groovy/datadog/trace/common/metrics/ConflatingMetricAggregatorTest.groovy
index 962ad2ce892..4fb752d2faa 100644
--- a/dd-trace-core/src/test/groovy/datadog/trace/common/metrics/ConflatingMetricAggregatorTest.groovy
+++ b/dd-trace-core/src/test/groovy/datadog/trace/common/metrics/ConflatingMetricAggregatorTest.groovy
@@ -38,6 +38,8 @@ class ConflatingMetricAggregatorTest extends DDSpecification {
ConflatingMetricsAggregator aggregator = new ConflatingMetricsAggregator(
wellKnownTags,
empty,
+ [] as Set,
+ 100,
features,
HealthMetrics.NO_OP,
sink,
@@ -68,6 +70,8 @@ class ConflatingMetricAggregatorTest extends DDSpecification {
ConflatingMetricsAggregator aggregator = new ConflatingMetricsAggregator(
wellKnownTags,
[ignoredResourceName].toSet(),
+ [] as Set,
+ 100,
features,
HealthMetrics.NO_OP,
sink,
@@ -104,6 +108,8 @@ class ConflatingMetricAggregatorTest extends DDSpecification {
features.supportsMetrics() >> true
features.peerTags() >> []
ConflatingMetricsAggregator aggregator = new ConflatingMetricsAggregator(empty,
+ [] as Set,
+ 100,
features, HealthMetrics.NO_OP, sink, writer, 10, queueSize, reportingInterval, SECONDS, false)
aggregator.start()
@@ -133,7 +139,7 @@ class ConflatingMetricAggregatorTest extends DDSpecification {
null,
null,
null
- ), _) >> { MetricKey key, AggregateMetric value ->
+ , null), _) >> { MetricKey key, AggregateMetric value ->
value.getHitCount() == 1 && value.getTopLevelCount() == 1 && value.getDuration() == 100
}
1 * writer.finishBucket() >> { latch.countDown() }
@@ -150,6 +156,8 @@ class ConflatingMetricAggregatorTest extends DDSpecification {
features.supportsMetrics() >> true
features.peerTags() >> []
ConflatingMetricsAggregator aggregator = new ConflatingMetricsAggregator(empty,
+ [] as Set,
+ 100,
features, HealthMetrics.NO_OP, sink, writer, 10, queueSize, reportingInterval, SECONDS, false)
aggregator.start()
@@ -179,7 +187,7 @@ class ConflatingMetricAggregatorTest extends DDSpecification {
null,
null,
null
- ), _) >> { MetricKey key, AggregateMetric value ->
+ , null), _) >> { MetricKey key, AggregateMetric value ->
value.getHitCount() == 1 && value.getTopLevelCount() == 1 && value.getDuration() == 100
}
1 * writer.finishBucket() >> { latch.countDown() }
@@ -196,6 +204,8 @@ class ConflatingMetricAggregatorTest extends DDSpecification {
features.supportsMetrics() >> true
features.peerTags() >> []
ConflatingMetricsAggregator aggregator = new ConflatingMetricsAggregator(empty,
+ [] as Set,
+ 100,
features, HealthMetrics.NO_OP, sink, writer, 10, queueSize, reportingInterval, SECONDS, true)
aggregator.start()
@@ -231,7 +241,7 @@ class ConflatingMetricAggregatorTest extends DDSpecification {
httpMethod,
httpEndpoint,
null
- ), { AggregateMetric aggregateMetric ->
+ , null), { AggregateMetric aggregateMetric ->
aggregateMetric.getHitCount() == 1 && aggregateMetric.getTopLevelCount() == 0 && aggregateMetric.getDuration() == 100
})
(statsComputed ? 1 : 0) * writer.finishBucket() >> { latch.countDown() }
@@ -261,6 +271,8 @@ class ConflatingMetricAggregatorTest extends DDSpecification {
features.supportsMetrics() >> true
features.peerTags() >>> [["country"], ["country", "georegion"],]
ConflatingMetricsAggregator aggregator = new ConflatingMetricsAggregator(empty,
+ [] as Set,
+ 100,
features, HealthMetrics.NO_OP, sink, writer, 10, queueSize, reportingInterval, SECONDS, false)
aggregator.start()
@@ -293,7 +305,7 @@ class ConflatingMetricAggregatorTest extends DDSpecification {
null,
null,
null
- ), { AggregateMetric aggregateMetric ->
+ , null), { AggregateMetric aggregateMetric ->
aggregateMetric.getHitCount() == 1 && aggregateMetric.getTopLevelCount() == 0 && aggregateMetric.getDuration() == 100
})
1 * writer.add(
@@ -311,7 +323,7 @@ class ConflatingMetricAggregatorTest extends DDSpecification {
null,
null,
null
- ), { AggregateMetric aggregateMetric ->
+ , null), { AggregateMetric aggregateMetric ->
aggregateMetric.getHitCount() == 1 && aggregateMetric.getTopLevelCount() == 0 && aggregateMetric.getDuration() == 100
})
1 * writer.finishBucket() >> { latch.countDown() }
@@ -328,6 +340,8 @@ class ConflatingMetricAggregatorTest extends DDSpecification {
features.supportsMetrics() >> true
features.peerTags() >> ["peer.hostname", "_dd.base_service"]
ConflatingMetricsAggregator aggregator = new ConflatingMetricsAggregator(empty,
+ [] as Set,
+ 100,
features, HealthMetrics.NO_OP, sink, writer, 10, queueSize, reportingInterval, SECONDS, false)
aggregator.start()
@@ -358,7 +372,7 @@ class ConflatingMetricAggregatorTest extends DDSpecification {
null,
null,
null
- ), { AggregateMetric aggregateMetric ->
+ , null), { AggregateMetric aggregateMetric ->
aggregateMetric.getHitCount() == 1 && aggregateMetric.getTopLevelCount() == 0 && aggregateMetric.getDuration() == 100
})
1 * writer.finishBucket() >> { latch.countDown() }
@@ -380,8 +394,8 @@ class ConflatingMetricAggregatorTest extends DDSpecification {
DDAgentFeaturesDiscovery features = Mock(DDAgentFeaturesDiscovery)
features.supportsMetrics() >> true
features.peerTags() >> []
- ConflatingMetricsAggregator aggregator = new ConflatingMetricsAggregator(empty, features, HealthMetrics.NO_OP,
- sink, writer, 10, queueSize, reportingInterval, SECONDS, false)
+ ConflatingMetricsAggregator aggregator = new ConflatingMetricsAggregator(empty, [] as Set, 100, features, HealthMetrics.NO_OP,
+ sink, writer, 10, queueSize, reportingInterval, SECONDS, false)
aggregator.start()
when:
@@ -410,7 +424,7 @@ class ConflatingMetricAggregatorTest extends DDSpecification {
null,
null,
null
- ), { AggregateMetric value ->
+ , null), { AggregateMetric value ->
value.getHitCount() == 1 && value.getTopLevelCount() == topLevelCount && value.getDuration() == 100
})
1 * writer.finishBucket() >> { latch.countDown() }
@@ -433,6 +447,8 @@ class ConflatingMetricAggregatorTest extends DDSpecification {
features.supportsMetrics() >> true
features.peerTags() >> []
ConflatingMetricsAggregator aggregator = new ConflatingMetricsAggregator(empty,
+ [] as Set,
+ 100,
features, HealthMetrics.NO_OP, sink, writer, 10, queueSize, reportingInterval, SECONDS, false)
long duration = 100
List trace = [
@@ -469,7 +485,7 @@ class ConflatingMetricAggregatorTest extends DDSpecification {
null,
null,
null
- ), { AggregateMetric value ->
+ , null), { AggregateMetric value ->
value.getHitCount() == count && value.getDuration() == count * duration
})
1 * writer.add(new MetricKey(
@@ -486,7 +502,7 @@ class ConflatingMetricAggregatorTest extends DDSpecification {
null,
null,
null
- ), { AggregateMetric value ->
+ , null), { AggregateMetric value ->
value.getHitCount() == count && value.getDuration() == count * duration * 2
})
@@ -505,6 +521,8 @@ class ConflatingMetricAggregatorTest extends DDSpecification {
features.supportsMetrics() >> true
features.peerTags() >> []
ConflatingMetricsAggregator aggregator = new ConflatingMetricsAggregator(empty,
+ [] as Set,
+ 100,
features, HealthMetrics.NO_OP, sink, writer, 10, queueSize, reportingInterval, SECONDS, true)
aggregator.start()
@@ -540,7 +558,7 @@ class ConflatingMetricAggregatorTest extends DDSpecification {
"GET",
"/api/users/:id",
null
- ), { AggregateMetric value ->
+ , null), { AggregateMetric value ->
value.getHitCount() == count && value.getDuration() == count * duration
})
1 * writer.finishBucket() >> { latch.countDown() }
@@ -581,7 +599,7 @@ class ConflatingMetricAggregatorTest extends DDSpecification {
"GET",
"/api/users/:id",
null
- ), { AggregateMetric value ->
+ , null), { AggregateMetric value ->
value.getHitCount() == 1 && value.getDuration() == duration
})
1 * writer.add(new MetricKey(
@@ -598,7 +616,7 @@ class ConflatingMetricAggregatorTest extends DDSpecification {
"GET",
"/api/orders/:id",
null
- ), { AggregateMetric value ->
+ , null), { AggregateMetric value ->
value.getHitCount() == 1 && value.getDuration() == duration * 2
})
1 * writer.add(new MetricKey(
@@ -615,7 +633,7 @@ class ConflatingMetricAggregatorTest extends DDSpecification {
"POST",
"/api/users/:id",
null
- ), { AggregateMetric value ->
+ , null), { AggregateMetric value ->
value.getHitCount() == 1 && value.getDuration() == duration * 3
})
1 * writer.finishBucket() >> { latch2.countDown() }
@@ -632,6 +650,8 @@ class ConflatingMetricAggregatorTest extends DDSpecification {
features.supportsMetrics() >> true
features.peerTags() >> []
ConflatingMetricsAggregator aggregator = new ConflatingMetricsAggregator(empty,
+ [] as Set,
+ 100,
features, HealthMetrics.NO_OP, sink, writer, 10, queueSize, reportingInterval, SECONDS, true)
aggregator.start()
@@ -679,7 +699,7 @@ class ConflatingMetricAggregatorTest extends DDSpecification {
"GET",
"/api/users/:id",
null
- ), { AggregateMetric value ->
+ , null), { AggregateMetric value ->
value.getHitCount() == 1 && value.getDuration() == duration
})
1 * writer.add(new MetricKey(
@@ -696,7 +716,7 @@ class ConflatingMetricAggregatorTest extends DDSpecification {
"POST",
"/api/users/:id",
null
- ), { AggregateMetric value ->
+ , null), { AggregateMetric value ->
value.getHitCount() == 1 && value.getDuration() == duration * 2
})
1 * writer.add(new MetricKey(
@@ -713,7 +733,7 @@ class ConflatingMetricAggregatorTest extends DDSpecification {
"GET",
"/api/users/:id",
null
- ), { AggregateMetric value ->
+ , null), { AggregateMetric value ->
value.getHitCount() == 1 && value.getDuration() == duration * 3
})
1 * writer.add(new MetricKey(
@@ -730,7 +750,7 @@ class ConflatingMetricAggregatorTest extends DDSpecification {
"GET",
"/api/orders/:id",
null
- ), { AggregateMetric value ->
+ , null), { AggregateMetric value ->
value.getHitCount() == 1 && value.getDuration() == duration * 4
})
1 * writer.finishBucket() >> { latch.countDown() }
@@ -747,6 +767,8 @@ class ConflatingMetricAggregatorTest extends DDSpecification {
features.supportsMetrics() >> true
features.peerTags() >> []
ConflatingMetricsAggregator aggregator = new ConflatingMetricsAggregator(empty,
+ [] as Set,
+ 100,
features, HealthMetrics.NO_OP, sink, writer, 10, queueSize, reportingInterval, SECONDS, true)
aggregator.start()
@@ -783,7 +805,7 @@ class ConflatingMetricAggregatorTest extends DDSpecification {
null,
null,
null
- ), { AggregateMetric value ->
+ , null), { AggregateMetric value ->
value.getHitCount() == 1 && value.getDuration() == duration
})
1 * writer.add(new MetricKey(
@@ -800,7 +822,7 @@ class ConflatingMetricAggregatorTest extends DDSpecification {
"GET",
"/api/users/:id",
null
- ), { AggregateMetric value ->
+ , null), { AggregateMetric value ->
value.getHitCount() == 1 && value.getDuration() == duration * 2
})
1 * writer.finishBucket() >> { latch.countDown() }
@@ -817,6 +839,8 @@ class ConflatingMetricAggregatorTest extends DDSpecification {
features.supportsMetrics() >> true
features.peerTags() >> []
ConflatingMetricsAggregator aggregator = new ConflatingMetricsAggregator(empty,
+ [] as Set,
+ 100,
features, HealthMetrics.NO_OP, sink, writer, 10, queueSize, reportingInterval, SECONDS, false)
aggregator.start()
@@ -851,7 +875,7 @@ class ConflatingMetricAggregatorTest extends DDSpecification {
null,
null,
null
- ), { AggregateMetric value ->
+ , null), { AggregateMetric value ->
value.getHitCount() == 2 && value.getDuration() == 2 * duration
})
1 * writer.add(new MetricKey(
@@ -868,7 +892,7 @@ class ConflatingMetricAggregatorTest extends DDSpecification {
null,
null,
null
- ), { AggregateMetric value ->
+ , null), { AggregateMetric value ->
value.getHitCount() == 1 && value.getDuration() == duration
})
1 * writer.finishBucket() >> { latch.countDown() }
@@ -886,6 +910,8 @@ class ConflatingMetricAggregatorTest extends DDSpecification {
features.supportsMetrics() >> true
features.peerTags() >> []
ConflatingMetricsAggregator aggregator = new ConflatingMetricsAggregator(empty,
+ [] as Set,
+ 100,
features, HealthMetrics.NO_OP, sink, writer, maxAggregates, queueSize, reportingInterval, SECONDS, false)
long duration = 100
aggregator.start()
@@ -919,7 +945,7 @@ class ConflatingMetricAggregatorTest extends DDSpecification {
null,
null,
null
- ), _) >> { MetricKey key, AggregateMetric value ->
+ , null), _) >> { MetricKey key, AggregateMetric value ->
value.getHitCount() == 1 && value.getDuration() == duration
}
}
@@ -937,7 +963,7 @@ class ConflatingMetricAggregatorTest extends DDSpecification {
null,
null,
null
- ), _)
+ , null), _)
1 * writer.finishBucket() >> { latch.countDown() }
cleanup:
@@ -954,6 +980,8 @@ class ConflatingMetricAggregatorTest extends DDSpecification {
features.peerTags() >> []
HealthMetrics healthMetrics = Mock(HealthMetrics)
ConflatingMetricsAggregator aggregator = new ConflatingMetricsAggregator(empty,
+ [] as Set,
+ 100,
features, healthMetrics, sink, writer, maxAggregates, queueSize, reportingInterval, SECONDS, false)
long duration = 100
aggregator.start()
@@ -988,6 +1016,8 @@ class ConflatingMetricAggregatorTest extends DDSpecification {
features.peerTags() >> []
HealthMetrics healthMetrics = Mock(HealthMetrics)
ConflatingMetricsAggregator aggregator = new ConflatingMetricsAggregator(empty,
+ [] as Set,
+ 100,
features, healthMetrics, sink, writer, maxAggregates, queueSize, reportingInterval, SECONDS, false)
aggregator.start()
@@ -1033,6 +1063,8 @@ class ConflatingMetricAggregatorTest extends DDSpecification {
features.supportsMetrics() >> true
features.peerTags() >> []
ConflatingMetricsAggregator aggregator = new ConflatingMetricsAggregator(empty,
+ [] as Set,
+ 100,
features, HealthMetrics.NO_OP, sink, writer, maxAggregates, queueSize, reportingInterval, SECONDS, false)
long duration = 100
aggregator.start()
@@ -1066,7 +1098,7 @@ class ConflatingMetricAggregatorTest extends DDSpecification {
null,
null,
null
- ), { AggregateMetric value ->
+ , null), { AggregateMetric value ->
value.getHitCount() == 1 && value.getDuration() == duration
})
}
@@ -1101,7 +1133,7 @@ class ConflatingMetricAggregatorTest extends DDSpecification {
null,
null,
null
- ), { AggregateMetric value ->
+ , null), { AggregateMetric value ->
value.getHitCount() == 1 && value.getDuration() == duration
})
}
@@ -1119,7 +1151,7 @@ class ConflatingMetricAggregatorTest extends DDSpecification {
null,
null,
null
- ), _)
+ , null), _)
1 * writer.finishBucket() >> { latch.countDown() }
cleanup:
@@ -1135,6 +1167,8 @@ class ConflatingMetricAggregatorTest extends DDSpecification {
features.supportsMetrics() >> true
features.peerTags() >> []
ConflatingMetricsAggregator aggregator = new ConflatingMetricsAggregator(empty,
+ [] as Set,
+ 100,
features, HealthMetrics.NO_OP, sink, writer, maxAggregates, queueSize, reportingInterval, SECONDS, false)
long duration = 100
aggregator.start()
@@ -1168,7 +1202,7 @@ class ConflatingMetricAggregatorTest extends DDSpecification {
null,
null,
null
- ), { AggregateMetric value ->
+ , null), { AggregateMetric value ->
value.getHitCount() == 1 && value.getDuration() == duration
})
}
@@ -1195,6 +1229,8 @@ class ConflatingMetricAggregatorTest extends DDSpecification {
features.supportsMetrics() >> true
features.peerTags() >> []
ConflatingMetricsAggregator aggregator = new ConflatingMetricsAggregator(empty,
+ [] as Set,
+ 100,
features, HealthMetrics.NO_OP, sink, writer, maxAggregates, queueSize, 1, SECONDS, false)
long duration = 100
aggregator.start()
@@ -1227,7 +1263,7 @@ class ConflatingMetricAggregatorTest extends DDSpecification {
null,
null,
null
- ), { AggregateMetric value ->
+ , null), { AggregateMetric value ->
value.getHitCount() == 1 && value.getDuration() == duration
})
}
@@ -1246,6 +1282,8 @@ class ConflatingMetricAggregatorTest extends DDSpecification {
features.supportsMetrics() >> true
features.peerTags() >> []
ConflatingMetricsAggregator aggregator = new ConflatingMetricsAggregator(empty,
+ [] as Set,
+ 100,
features, HealthMetrics.NO_OP, sink, writer, maxAggregates, queueSize, 1, SECONDS, false)
long duration = 100
aggregator.start()
@@ -1277,6 +1315,8 @@ class ConflatingMetricAggregatorTest extends DDSpecification {
Sink sink = Stub(Sink)
DDAgentFeaturesDiscovery features = Mock(DDAgentFeaturesDiscovery)
ConflatingMetricsAggregator aggregator = new ConflatingMetricsAggregator(empty,
+ [] as Set,
+ 100,
features, HealthMetrics.NO_OP, sink, writer, maxAggregates, queueSize, 1, SECONDS, false)
aggregator.start()
@@ -1299,6 +1339,8 @@ class ConflatingMetricAggregatorTest extends DDSpecification {
features.supportsMetrics() >> false
features.peerTags() >> []
ConflatingMetricsAggregator aggregator = new ConflatingMetricsAggregator(empty,
+ [] as Set,
+ 100,
features, HealthMetrics.NO_OP, sink, writer, 10, queueSize, 200, MILLISECONDS, false)
final spans = [
new SimpleSpan("service", "operation", "resource", "type", false, true, false, 0, 10, HTTP_OK)
@@ -1331,6 +1373,8 @@ class ConflatingMetricAggregatorTest extends DDSpecification {
DDAgentFeaturesDiscovery features = Mock(DDAgentFeaturesDiscovery)
features.supportsMetrics() >> true
ConflatingMetricsAggregator aggregator = new ConflatingMetricsAggregator(empty,
+ [] as Set,
+ 100,
features, HealthMetrics.NO_OP, sink, writer, maxAggregates, queueSize, 1, SECONDS, false)
when:
@@ -1364,6 +1408,8 @@ class ConflatingMetricAggregatorTest extends DDSpecification {
DDAgentFeaturesDiscovery features = Mock(DDAgentFeaturesDiscovery)
features.supportsMetrics() >> true
ConflatingMetricsAggregator aggregator = new ConflatingMetricsAggregator(empty,
+ [] as Set,
+ 100,
features, HealthMetrics.NO_OP, sink, writer, 10, queueSize, reportingInterval, SECONDS, false)
aggregator.start()
@@ -1394,7 +1440,7 @@ class ConflatingMetricAggregatorTest extends DDSpecification {
null,
null,
null
- ), { AggregateMetric aggregateMetric ->
+ , null), { AggregateMetric aggregateMetric ->
aggregateMetric.getHitCount() == 1 && aggregateMetric.getTopLevelCount() == 1 && aggregateMetric.getDuration() == 100
})
1 * writer.finishBucket() >> { latch.countDown() }
@@ -1411,6 +1457,8 @@ class ConflatingMetricAggregatorTest extends DDSpecification {
features.supportsMetrics() >> true
features.peerTags() >> []
ConflatingMetricsAggregator aggregator = new ConflatingMetricsAggregator(empty,
+ [] as Set,
+ 100,
features, HealthMetrics.NO_OP, sink, writer, 10, queueSize, reportingInterval, SECONDS, false)
aggregator.start()
@@ -1449,7 +1497,7 @@ class ConflatingMetricAggregatorTest extends DDSpecification {
null,
null,
null
- ), { AggregateMetric aggregateMetric ->
+ , null), { AggregateMetric aggregateMetric ->
aggregateMetric.getHitCount() == 3 && aggregateMetric.getTopLevelCount() == 3 && aggregateMetric.getDuration() == 450
})
1 * writer.finishBucket() >> { latch.countDown() }
@@ -1466,6 +1514,8 @@ class ConflatingMetricAggregatorTest extends DDSpecification {
features.supportsMetrics() >> true
features.peerTags() >> []
ConflatingMetricsAggregator aggregator = new ConflatingMetricsAggregator(empty,
+ [] as Set,
+ 100,
features, HealthMetrics.NO_OP, sink, writer, 10, queueSize, reportingInterval, SECONDS, true)
aggregator.start()
@@ -1504,7 +1554,7 @@ class ConflatingMetricAggregatorTest extends DDSpecification {
"GET",
"/api/users/:id",
null
- ), { AggregateMetric aggregateMetric ->
+ , null), { AggregateMetric aggregateMetric ->
aggregateMetric.getHitCount() == 1 && aggregateMetric.getTopLevelCount() == 1 && aggregateMetric.getDuration() == 100
})
1 * writer.add(
@@ -1522,7 +1572,7 @@ class ConflatingMetricAggregatorTest extends DDSpecification {
"POST",
"/api/orders",
null
- ), { AggregateMetric aggregateMetric ->
+ , null), { AggregateMetric aggregateMetric ->
aggregateMetric.getHitCount() == 1 && aggregateMetric.getTopLevelCount() == 1 && aggregateMetric.getDuration() == 200
})
1 * writer.add(
@@ -1540,7 +1590,7 @@ class ConflatingMetricAggregatorTest extends DDSpecification {
null,
null,
null
- ), { AggregateMetric aggregateMetric ->
+ , null), { AggregateMetric aggregateMetric ->
aggregateMetric.getHitCount() == 1 && aggregateMetric.getTopLevelCount() == 1 && aggregateMetric.getDuration() == 150
})
1 * writer.finishBucket() >> { latch.countDown() }
@@ -1557,6 +1607,8 @@ class ConflatingMetricAggregatorTest extends DDSpecification {
features.supportsMetrics() >> true
features.peerTags() >> []
ConflatingMetricsAggregator aggregator = new ConflatingMetricsAggregator(empty,
+ [] as Set,
+ 100,
features, HealthMetrics.NO_OP, sink, writer, 10, queueSize, reportingInterval, SECONDS, false)
aggregator.start()
@@ -1592,7 +1644,7 @@ class ConflatingMetricAggregatorTest extends DDSpecification {
null,
null,
"0"
- ), _)
+ , null), _)
1 * writer.add(new MetricKey(
"grpc.service/Method",
"service",
@@ -1607,7 +1659,7 @@ class ConflatingMetricAggregatorTest extends DDSpecification {
null,
null,
"5"
- ), _)
+ , null), _)
1 * writer.add(new MetricKey(
"GET /api",
"service",
@@ -1622,7 +1674,7 @@ class ConflatingMetricAggregatorTest extends DDSpecification {
null,
null,
null
- ), _)
+ , null), _)
1 * writer.finishBucket() >> { latch.countDown() }
cleanup:
diff --git a/dd-trace-core/src/test/groovy/datadog/trace/common/metrics/FootprintForkedTest.groovy b/dd-trace-core/src/test/groovy/datadog/trace/common/metrics/FootprintForkedTest.groovy
index eceedeb1935..07f18997f0b 100644
--- a/dd-trace-core/src/test/groovy/datadog/trace/common/metrics/FootprintForkedTest.groovy
+++ b/dd-trace-core/src/test/groovy/datadog/trace/common/metrics/FootprintForkedTest.groovy
@@ -40,6 +40,8 @@ class FootprintForkedTest extends DDSpecification {
ConflatingMetricsAggregator aggregator = new ConflatingMetricsAggregator(
new WellKnownTags("runtimeid","hostname", "env", "service", "version","language"),
[].toSet() as Set,
+ [] as Set,
+ 100,
features,
HealthMetrics.NO_OP,
sink,
diff --git a/dd-trace-core/src/test/groovy/datadog/trace/common/metrics/SerializingMetricWriterTest.groovy b/dd-trace-core/src/test/groovy/datadog/trace/common/metrics/SerializingMetricWriterTest.groovy
index 3ff81de9851..bda4aa9b654 100644
--- a/dd-trace-core/src/test/groovy/datadog/trace/common/metrics/SerializingMetricWriterTest.groovy
+++ b/dd-trace-core/src/test/groovy/datadog/trace/common/metrics/SerializingMetricWriterTest.groovy
@@ -74,7 +74,7 @@ class SerializingMetricWriterTest extends DDSpecification {
null,
null,
null
- ),
+ , null),
new AggregateMetric().recordDurations(10, new AtomicLongArray(1L))
),
Pair.of(
@@ -96,7 +96,7 @@ class SerializingMetricWriterTest extends DDSpecification {
null,
null,
null
- ),
+ , null),
new AggregateMetric().recordDurations(9, new AtomicLongArray(1L))
),
Pair.of(
@@ -114,7 +114,7 @@ class SerializingMetricWriterTest extends DDSpecification {
"GET",
"/api/users/:id",
null
- ),
+ , null),
new AggregateMetric().recordDurations(5, new AtomicLongArray(1L))
)
],
@@ -134,7 +134,7 @@ class SerializingMetricWriterTest extends DDSpecification {
null,
null,
null
- ),
+ , null),
new AggregateMetric().recordDurations(10, new AtomicLongArray(1L))
)
})
@@ -149,8 +149,8 @@ class SerializingMetricWriterTest extends DDSpecification {
WellKnownTags wellKnownTags = new WellKnownTags("runtimeid", "hostname", "env", "service", "version", "language")
// Create keys with different combinations of HTTP fields
- def keyWithNoSource = new MetricKey("resource", "service", "operation", null, "type", 200, false, false, "server", [], "GET", "/api/users", null)
- def keyWithSource = new MetricKey("resource", "service", "operation", "source", "type", 200, false, false, "server", [], "POST", null, null)
+ def keyWithNoSource = new MetricKey("resource", "service", "operation", null, "type", 200, false, false, "server", [], "GET", "/api/users", null, null)
+ def keyWithSource = new MetricKey("resource", "service", "operation", "source", "type", 200, false, false, "server", [], "POST", null, null, null)
def content = [
Pair.of(keyWithNoSource, new AggregateMetric().recordDurations(1, new AtomicLongArray(1L))),
@@ -178,10 +178,10 @@ class SerializingMetricWriterTest extends DDSpecification {
WellKnownTags wellKnownTags = new WellKnownTags("runtimeid", "hostname", "env", "service", "version", "language")
// Create keys with different combinations of HTTP fields
- def keyWithBoth = new MetricKey("resource", "service", "operation", null, "type", 200, false, false, "server", [], "GET", "/api/users", null)
- def keyWithMethodOnly = new MetricKey("resource", "service", "operation", null, "type", 200, false, false, "server", [], "POST", null,null)
- def keyWithEndpointOnly = new MetricKey("resource", "service", "operation", null, "type", 200, false, false, "server", [], null, "/api/orders",null)
- def keyWithNeither = new MetricKey("resource", "service", "operation", null, "type", 200, false, false, "client", [], null, null, null)
+ def keyWithBoth = new MetricKey("resource", "service", "operation", null, "type", 200, false, false, "server", [], "GET", "/api/users", null, null)
+ def keyWithMethodOnly = new MetricKey("resource", "service", "operation", null, "type", 200, false, false, "server", [], "POST", null,null, null)
+ def keyWithEndpointOnly = new MetricKey("resource", "service", "operation", null, "type", 200, false, false, "server", [], null, "/api/orders",null, null)
+ def keyWithNeither = new MetricKey("resource", "service", "operation", null, "type", 200, false, false, "client", [], null, null, null, null)
def content = [
Pair.of(keyWithBoth, new AggregateMetric().recordDurations(1, new AtomicLongArray(1L))),
@@ -217,7 +217,7 @@ class SerializingMetricWriterTest extends DDSpecification {
WellKnownTags wellKnownTags = new WellKnownTags("runtimeid", "hostname", "env", "service", "version", "language")
// Create keys with different combinations of HTTP fields
- def key = new MetricKey("resource", "service", "operation", null, "type", 200, false, false, "server", [], "GET", "/api/users", null)
+ def key = new MetricKey("resource", "service", "operation", null, "type", 200, false, false, "server", [], "GET", "/api/users", null, null)
def content = [Pair.of(key, new AggregateMetric().recordDurations(1, new AtomicLongArray(1L))),]
@@ -307,7 +307,8 @@ class SerializingMetricWriterTest extends DDSpecification {
boolean hasHttpEndpoint = key.getHttpEndpoint() != null
boolean hasServiceSource = key.getServiceSource() != null
boolean hasGrpcStatusCode = key.getGrpcStatusCode() != null
- int expectedMapSize = 15 + (hasServiceSource ? 1 : 0) + (hasHttpMethod ? 1 : 0) + (hasHttpEndpoint ? 1 : 0) + (hasGrpcStatusCode ? 1 : 0)
+ boolean hasAdditionalTags = key.getAdditionalTags().size() > 0
+ int expectedMapSize = 15 + (hasServiceSource ? 1 : 0) + (hasHttpMethod ? 1 : 0) + (hasHttpEndpoint ? 1 : 0) + (hasGrpcStatusCode ? 1 : 0) + (hasAdditionalTags ? 1 : 0)
assert metricMapSize == expectedMapSize
int elementCount = 0
assert unpacker.unpackString() == "Name"
@@ -342,6 +343,16 @@ class SerializingMetricWriterTest extends DDSpecification {
assert unpackedPeerTag == key.getPeerTags()[i].toString()
}
++elementCount
+ if (hasAdditionalTags) {
+ assert unpacker.unpackString() == "AdditionalMetricTags"
+ int additionalTagsLength = unpacker.unpackArrayHeader()
+ assert additionalTagsLength == key.getAdditionalTags().size()
+ for (int i = 0; i < additionalTagsLength; i++) {
+ def unpackedTag = unpacker.unpackString()
+ assert unpackedTag == key.getAdditionalTags()[i].toString()
+ }
+ ++elementCount
+ }
// Service source is only present when the service name has been overridden by the tracer
if (hasServiceSource) {
assert unpacker.unpackString() == "srv_src"
@@ -405,8 +416,8 @@ class SerializingMetricWriterTest extends DDSpecification {
WellKnownTags wellKnownTags = new WellKnownTags("runtimeid", "hostname", "env", "service", "version", "language")
// Create keys with different combinations of HTTP fields
- def keyWithNoSource = new MetricKey("resource", "service", "operation", null, "type", 200, false, false, "server", [], "GET", "/api/users", null)
- def keyWithSource = new MetricKey("resource", "service", "operation", "source", "type", 200, false, false, "server", [], "POST", null, null)
+ def keyWithNoSource = new MetricKey("resource", "service", "operation", null, "type", 200, false, false, "server", [], "GET", "/api/users", null, null)
+ def keyWithSource = new MetricKey("resource", "service", "operation", "source", "type", 200, false, false, "server", [], "POST", null, null, null)
def content = [
Pair.of(keyWithNoSource, new AggregateMetric().recordDurations(1, new AtomicLongArray(1L))),
@@ -433,9 +444,9 @@ class SerializingMetricWriterTest extends DDSpecification {
long duration = SECONDS.toNanos(10)
WellKnownTags wellKnownTags = new WellKnownTags("runtimeid", "hostname", "env", "service", "version", "language")
- def keyWithGrpc = new MetricKey("grpc.service/Method", "grpc-service", "grpc.server", null, "rpc", 0, false, false, "server", [], null, null, "OK")
- def keyWithGrpcError = new MetricKey("grpc.service/Method", "grpc-service", "grpc.server", null, "rpc", 0, false, false, "client", [], null, null, "NOT_FOUND")
- def keyWithoutGrpc = new MetricKey("resource", "service", "operation", null, "web", 200, false, false, "server", [], null, null, null)
+ def keyWithGrpc = new MetricKey("grpc.service/Method", "grpc-service", "grpc.server", null, "rpc", 0, false, false, "server", [], null, null, "OK", null)
+ def keyWithGrpcError = new MetricKey("grpc.service/Method", "grpc-service", "grpc.server", null, "rpc", 0, false, false, "client", [], null, null, "NOT_FOUND", null)
+ def keyWithoutGrpc = new MetricKey("resource", "service", "operation", null, "web", 200, false, false, "server", [], null, null, null, null)
def content = [
Pair.of(keyWithGrpc, new AggregateMetric().recordDurations(1, new AtomicLongArray(1L))),
@@ -464,10 +475,10 @@ class SerializingMetricWriterTest extends DDSpecification {
WellKnownTags wellKnownTags = new WellKnownTags("runtimeid", "hostname", "env", "service", "version", "language")
// Create keys with different combinations of HTTP fields
- def keyWithBoth = new MetricKey("resource", "service", "operation", null, "type", 200, false, false, "server", [], "GET", "/api/users", null)
- def keyWithMethodOnly = new MetricKey("resource", "service", "operation", null, "type", 200, false, false, "server", [], "POST", null, null)
- def keyWithEndpointOnly = new MetricKey("resource", "service", "operation", null, "type", 200, false, false, "server", [], null, "/api/orders", null)
- def keyWithNeither = new MetricKey("resource", "service", "operation", null, "type", 200, false, false, "client", [], null, null, null)
+ def keyWithBoth = new MetricKey("resource", "service", "operation", null, "type", 200, false, false, "server", [], "GET", "/api/users", null, null)
+ def keyWithMethodOnly = new MetricKey("resource", "service", "operation", null, "type", 200, false, false, "server", [], "POST", null, null, null)
+ def keyWithEndpointOnly = new MetricKey("resource", "service", "operation", null, "type", 200, false, false, "server", [], null, "/api/orders", null, null)
+ def keyWithNeither = new MetricKey("resource", "service", "operation", null, "type", 200, false, false, "client", [], null, null, null, null)
def content = [
Pair.of(keyWithBoth, new AggregateMetric().recordDurations(1, new AtomicLongArray(1L))),
diff --git a/dd-trace-core/src/test/java/datadog/trace/common/metrics/AdditionalTagsCardinalityLimiterTest.java b/dd-trace-core/src/test/java/datadog/trace/common/metrics/AdditionalTagsCardinalityLimiterTest.java
new file mode 100644
index 00000000000..f15596fca2e
--- /dev/null
+++ b/dd-trace-core/src/test/java/datadog/trace/common/metrics/AdditionalTagsCardinalityLimiterTest.java
@@ -0,0 +1,122 @@
+package datadog.trace.common.metrics;
+
+import static datadog.trace.common.metrics.AdditionalTagsCardinalityLimiter.BLOCKED_VALUE;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotEquals;
+
+import datadog.trace.core.monitor.HealthMetrics;
+import java.util.ArrayList;
+import java.util.List;
+import org.junit.jupiter.api.Test;
+
+class AdditionalTagsCardinalityLimiterTest {
+
+ @Test
+ void belowLimitAdmitsAllValues() {
+ AdditionalTagsCardinalityLimiter limiter =
+ new AdditionalTagsCardinalityLimiter(100, HealthMetrics.NO_OP);
+ for (int i = 0; i < 99; i++) {
+ String v = "v" + i;
+ assertEquals(v, limiter.admitOrBlock("region", v));
+ }
+ }
+
+ @Test
+ void atLimitNextNewValueIsBlocked() {
+ AdditionalTagsCardinalityLimiter limiter =
+ new AdditionalTagsCardinalityLimiter(3, HealthMetrics.NO_OP);
+ assertEquals("a", limiter.admitOrBlock("region", "a"));
+ assertEquals("b", limiter.admitOrBlock("region", "b"));
+ assertEquals("c", limiter.admitOrBlock("region", "c"));
+ assertEquals(BLOCKED_VALUE, limiter.admitOrBlock("region", "d"));
+ }
+
+ @Test
+ void alreadyAdmittedValueStaysAdmittedAfterCapHit() {
+ AdditionalTagsCardinalityLimiter limiter =
+ new AdditionalTagsCardinalityLimiter(3, HealthMetrics.NO_OP);
+ limiter.admitOrBlock("region", "a");
+ limiter.admitOrBlock("region", "b");
+ limiter.admitOrBlock("region", "c");
+ limiter.admitOrBlock("region", "d"); // blocked
+ assertEquals("a", limiter.admitOrBlock("region", "a"));
+ assertEquals("b", limiter.admitOrBlock("region", "b"));
+ assertNotEquals(BLOCKED_VALUE, limiter.admitOrBlock("region", "c"));
+ }
+
+ @Test
+ void differentTagsAreIndependent() {
+ AdditionalTagsCardinalityLimiter limiter =
+ new AdditionalTagsCardinalityLimiter(2, HealthMetrics.NO_OP);
+ limiter.admitOrBlock("customer_id", "x");
+ limiter.admitOrBlock("customer_id", "y");
+ assertEquals(BLOCKED_VALUE, limiter.admitOrBlock("customer_id", "z"));
+ // region should be completely unaffected
+ assertEquals("us-east-1", limiter.admitOrBlock("region", "us-east-1"));
+ assertEquals("eu-west-1", limiter.admitOrBlock("region", "eu-west-1"));
+ assertEquals(BLOCKED_VALUE, limiter.admitOrBlock("region", "ap-south-1"));
+ }
+
+ @Test
+ void resetReadmitsPreviouslyBlockedValues() {
+ AdditionalTagsCardinalityLimiter limiter =
+ new AdditionalTagsCardinalityLimiter(2, HealthMetrics.NO_OP);
+ limiter.admitOrBlock("region", "a");
+ limiter.admitOrBlock("region", "b");
+ assertEquals(BLOCKED_VALUE, limiter.admitOrBlock("region", "c"));
+ limiter.reset();
+ assertEquals("c", limiter.admitOrBlock("region", "c"));
+ }
+
+ @Test
+ void healthMetricFiresOnBlock() {
+ RecordingHealthMetrics health = new RecordingHealthMetrics();
+ AdditionalTagsCardinalityLimiter limiter = new AdditionalTagsCardinalityLimiter(2, health);
+ limiter.admitOrBlock("region", "a");
+ limiter.admitOrBlock("region", "b");
+ assertEquals(0, health.blocked.size());
+ limiter.admitOrBlock("region", "c"); // blocked
+ limiter.admitOrBlock("region", "d"); // blocked
+ assertEquals(2, health.blocked.size());
+ assertEquals("region", health.blocked.get(0));
+ assertEquals("region", health.blocked.get(1));
+ }
+
+ @Test
+ void noteBlockedDueToLengthFiresHealthMetric() {
+ RecordingHealthMetrics health = new RecordingHealthMetrics();
+ AdditionalTagsCardinalityLimiter limiter = new AdditionalTagsCardinalityLimiter(100, health);
+ limiter.noteBlockedDueToLength("region", 500, 250);
+ assertEquals(1, health.blocked.size());
+ assertEquals("region", health.blocked.get(0));
+ }
+
+ @Test
+ void lengthAndCardinalityBlocksAreCountedSeparatelyInHealth() {
+ RecordingHealthMetrics health = new RecordingHealthMetrics();
+ AdditionalTagsCardinalityLimiter limiter = new AdditionalTagsCardinalityLimiter(2, health);
+ // exhaust cardinality
+ limiter.admitOrBlock("region", "a");
+ limiter.admitOrBlock("region", "b");
+ limiter.admitOrBlock("region", "c"); // cardinality block -> 1 health event
+ // length block on same tag -> 2 health events total
+ limiter.noteBlockedDueToLength("region", 500, 250);
+ assertEquals(2, health.blocked.size());
+ // reset rearms both branches
+ limiter.reset();
+ limiter.admitOrBlock("region", "x");
+ limiter.admitOrBlock("region", "y");
+ limiter.admitOrBlock("region", "z"); // cardinality block again -> 3
+ limiter.noteBlockedDueToLength("region", 500, 250); // length block again -> 4
+ assertEquals(4, health.blocked.size());
+ }
+
+ private static final class RecordingHealthMetrics extends HealthMetrics {
+ final List blocked = new ArrayList<>();
+
+ @Override
+ public void onAdditionalTagValueCardinalityBlocked(String tagKey) {
+ blocked.add(tagKey);
+ }
+ }
+}
diff --git a/dd-trace-core/src/test/java/datadog/trace/common/metrics/ConflatingMetricsAggregatorNormalizationTest.java b/dd-trace-core/src/test/java/datadog/trace/common/metrics/ConflatingMetricsAggregatorNormalizationTest.java
new file mode 100644
index 00000000000..8453316a65a
--- /dev/null
+++ b/dd-trace-core/src/test/java/datadog/trace/common/metrics/ConflatingMetricsAggregatorNormalizationTest.java
@@ -0,0 +1,58 @@
+package datadog.trace.common.metrics;
+
+import static datadog.trace.common.metrics.ConflatingMetricsAggregator.MAX_ADDITIONAL_TAG_KEYS;
+import static datadog.trace.common.metrics.ConflatingMetricsAggregator.normalizeAdditionalTagKeys;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.LinkedHashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.TreeSet;
+import org.junit.jupiter.api.Test;
+
+class ConflatingMetricsAggregatorNormalizationTest {
+
+ @Test
+ void nullOrEmptyProducesEmptyList() {
+ assertEquals(Collections.emptyList(), normalizeAdditionalTagKeys(null));
+ assertEquals(
+ Collections.emptyList(), normalizeAdditionalTagKeys(Collections.emptySet()));
+ }
+
+ @Test
+ void resultIsSortedAlphabetically() {
+ Set configured = new LinkedHashSet<>(Arrays.asList("region", "tenant_id", "az"));
+ assertEquals(
+ Arrays.asList("az", "region", "tenant_id"), normalizeAdditionalTagKeys(configured));
+ }
+
+ @Test
+ void resultIsImmutable() {
+ Set configured = new LinkedHashSet<>(Arrays.asList("region", "tenant_id"));
+ List normalized = normalizeAdditionalTagKeys(configured);
+ assertThrows(UnsupportedOperationException.class, () -> normalized.add("oops"));
+ }
+
+ @Test
+ void inputOrderDoesNotAffectResult() {
+ Set a = new LinkedHashSet<>(Arrays.asList("region", "tenant_id"));
+ Set b = new LinkedHashSet<>(Arrays.asList("tenant_id", "region"));
+ assertEquals(normalizeAdditionalTagKeys(a), normalizeAdditionalTagKeys(b));
+ }
+
+ @Test
+ void exceedingMaxKeysTruncatesAfterSort() {
+ Set configured = new TreeSet<>();
+ for (int i = 0; i < MAX_ADDITIONAL_TAG_KEYS + 5; i++) {
+ configured.add(String.format("tag_%02d", i));
+ }
+ List normalized = normalizeAdditionalTagKeys(configured);
+ assertEquals(MAX_ADDITIONAL_TAG_KEYS, normalized.size());
+ assertTrue(normalized.contains("tag_00"));
+ assertTrue(normalized.contains(String.format("tag_%02d", MAX_ADDITIONAL_TAG_KEYS - 1)));
+ }
+}
diff --git a/dd-trace-core/src/test/java/datadog/trace/common/metrics/MetricKeyAdditionalTagsTest.java b/dd-trace-core/src/test/java/datadog/trace/common/metrics/MetricKeyAdditionalTagsTest.java
new file mode 100644
index 00000000000..1cdd35f02bc
--- /dev/null
+++ b/dd-trace-core/src/test/java/datadog/trace/common/metrics/MetricKeyAdditionalTagsTest.java
@@ -0,0 +1,77 @@
+package datadog.trace.common.metrics;
+
+import static java.util.Collections.emptyList;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotEquals;
+
+import datadog.trace.bootstrap.instrumentation.api.UTF8BytesString;
+import java.util.ArrayList;
+import java.util.List;
+import org.junit.jupiter.api.Test;
+
+class MetricKeyAdditionalTagsTest {
+
+ @Test
+ void emptyAndNullAdditionalTagsAreEquivalent() {
+ MetricKey a = key(null);
+ MetricKey b = key(emptyList());
+ assertEquals(a, b);
+ assertEquals(a.hashCode(), b.hashCode());
+ assertEquals(emptyList(), a.getAdditionalTags());
+ }
+
+ @Test
+ void sameOrderProducesEqualKeys() {
+ MetricKey a = key(tags("region:us-east-1", "tenant_id:acme"));
+ MetricKey b = key(tags("region:us-east-1", "tenant_id:acme"));
+ assertEquals(a, b);
+ assertEquals(a.hashCode(), b.hashCode());
+ }
+
+ @Test
+ void differentValuesProduceDifferentKeys() {
+ MetricKey a = key(tags("region:us-east-1"));
+ MetricKey b = key(tags("region:eu-west-1"));
+ assertNotEquals(a, b);
+ }
+
+ @Test
+ void differentTagSetsProduceDifferentKeys() {
+ MetricKey a = key(tags("region:us-east-1"));
+ MetricKey b = key(tags("region:us-east-1", "tenant_id:acme"));
+ assertNotEquals(a, b);
+ }
+
+ @Test
+ void keyWithAdditionalTagsDiffersFromKeyWithout() {
+ MetricKey a = key(emptyList());
+ MetricKey b = key(tags("region:us-east-1"));
+ assertNotEquals(a, b);
+ }
+
+ private static List tags(String... entries) {
+ List list = new ArrayList<>(entries.length);
+ for (String e : entries) {
+ list.add(UTF8BytesString.create(e));
+ }
+ return list;
+ }
+
+ private static MetricKey key(List additionalTags) {
+ return new MetricKey(
+ "resource",
+ "service",
+ "operation",
+ null,
+ "web",
+ 200,
+ false,
+ false,
+ "server",
+ emptyList(),
+ null,
+ null,
+ null,
+ additionalTags);
+ }
+}
diff --git a/dd-trace-core/src/test/java/datadog/trace/common/metrics/SerializingMetricWriterAdditionalTagsTest.java b/dd-trace-core/src/test/java/datadog/trace/common/metrics/SerializingMetricWriterAdditionalTagsTest.java
new file mode 100644
index 00000000000..2733b986b33
--- /dev/null
+++ b/dd-trace-core/src/test/java/datadog/trace/common/metrics/SerializingMetricWriterAdditionalTagsTest.java
@@ -0,0 +1,132 @@
+package datadog.trace.common.metrics;
+
+import static java.util.Collections.emptyList;
+import static java.util.concurrent.TimeUnit.MILLISECONDS;
+import static java.util.concurrent.TimeUnit.SECONDS;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertNull;
+
+import datadog.metrics.api.Histograms;
+import datadog.metrics.impl.DDSketchHistograms;
+import datadog.trace.api.WellKnownTags;
+import datadog.trace.bootstrap.instrumentation.api.UTF8BytesString;
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.concurrent.atomic.AtomicLongArray;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
+import org.msgpack.core.MessagePack;
+import org.msgpack.core.MessageUnpacker;
+
+class SerializingMetricWriterAdditionalTagsTest {
+
+ @BeforeAll
+ static void registerHistograms() {
+ Histograms.register(DDSketchHistograms.FACTORY);
+ }
+
+ @Test
+ void emptyAdditionalTagsOmitTheField() throws Exception {
+ List emitted = roundTripAdditionalTags(emptyList());
+ assertNull(emitted);
+ }
+
+ @Test
+ void populatedAdditionalTagsAreEmittedInOrder() throws Exception {
+ List tags =
+ Arrays.asList(
+ UTF8BytesString.create("region:us-east-1"), UTF8BytesString.create("tenant_id:acme"));
+ List emitted = roundTripAdditionalTags(tags);
+ assertEquals(Arrays.asList("region:us-east-1", "tenant_id:acme"), emitted);
+ }
+
+ private List roundTripAdditionalTags(List additionalTags)
+ throws Exception {
+ WellKnownTags wellKnownTags =
+ new WellKnownTags("runtimeid", "hostname", "env", "service", "version", "language");
+ CapturingSink sink = new CapturingSink();
+ SerializingMetricWriter writer = new SerializingMetricWriter(wellKnownTags, sink, 128);
+ MetricKey key =
+ new MetricKey(
+ "resource",
+ "service",
+ "operation",
+ null,
+ "web",
+ 200,
+ false,
+ false,
+ "server",
+ emptyList(),
+ null,
+ null,
+ null,
+ additionalTags);
+ AggregateMetric aggregate =
+ new AggregateMetric().recordDurations(1, new AtomicLongArray(new long[] {1L}));
+ long start = MILLISECONDS.toNanos(System.currentTimeMillis());
+ long duration = SECONDS.toNanos(10);
+ writer.startBucket(1, start, duration);
+ writer.add(key, aggregate);
+ writer.finishBucket();
+ assertNotNull(sink.captured);
+ return readAdditionalTagsFromPayload(sink.captured);
+ }
+
+ private static List readAdditionalTagsFromPayload(ByteBuffer buffer) throws Exception {
+ MessageUnpacker unpacker = MessagePack.newDefaultUnpacker(buffer);
+ int envelopeSize = unpacker.unpackMapHeader();
+ for (int i = 0; i < envelopeSize; i++) {
+ String envelopeKey = unpacker.unpackString();
+ if ("Stats".equals(envelopeKey)) {
+ int bucketCount = unpacker.unpackArrayHeader();
+ assertEquals(1, bucketCount);
+ int bucketMapSize = unpacker.unpackMapHeader();
+ for (int b = 0; b < bucketMapSize; b++) {
+ String bk = unpacker.unpackString();
+ if ("Stats".equals(bk)) {
+ int statCount = unpacker.unpackArrayHeader();
+ assertEquals(1, statCount);
+ int metricMapSize = unpacker.unpackMapHeader();
+ for (int m = 0; m < metricMapSize; m++) {
+ String mk = unpacker.unpackString();
+ if ("AdditionalMetricTags".equals(mk)) {
+ int tagCount = unpacker.unpackArrayHeader();
+ List result = new ArrayList<>(tagCount);
+ for (int t = 0; t < tagCount; t++) {
+ result.add(unpacker.unpackString());
+ }
+ return result;
+ } else {
+ unpacker.skipValue();
+ }
+ }
+ } else {
+ unpacker.skipValue();
+ }
+ }
+ } else {
+ unpacker.skipValue();
+ }
+ }
+ return null;
+ }
+
+ private static final class CapturingSink implements Sink {
+ ByteBuffer captured;
+ final List listeners = new ArrayList<>();
+
+ @Override
+ public void register(EventListener listener) {
+ listeners.add(listener);
+ }
+
+ @Override
+ public void accept(int messageCount, ByteBuffer buffer) {
+ this.captured = buffer;
+ }
+ }
+}
diff --git a/dd-trace-core/src/traceAgentTest/groovy/MetricsIntegrationTest.groovy b/dd-trace-core/src/traceAgentTest/groovy/MetricsIntegrationTest.groovy
index 2972ffa2c18..ae6ec6b82eb 100644
--- a/dd-trace-core/src/traceAgentTest/groovy/MetricsIntegrationTest.groovy
+++ b/dd-trace-core/src/traceAgentTest/groovy/MetricsIntegrationTest.groovy
@@ -40,11 +40,11 @@ class MetricsIntegrationTest extends AbstractTraceAgentTest {
)
writer.startBucket(2, System.nanoTime(), SECONDS.toNanos(10))
writer.add(
- new MetricKey("resource1", "service1", "operation1", null, "sql", 0, false, true, "xyzzy", [UTF8BytesString.create("grault:quux")], null, null, null),
+ new MetricKey("resource1", "service1", "operation1", null, "sql", 0, false, true, "xyzzy", [UTF8BytesString.create("grault:quux")], null, null, null, null),
new AggregateMetric().recordDurations(5, new AtomicLongArray(2, 1, 2, 250, 4, 5))
)
writer.add(
- new MetricKey("resource2", "service2", "operation2", null, "web", 200, false, true, "xyzzy", [UTF8BytesString.create("grault:quux")], null, null, null),
+ new MetricKey("resource2", "service2", "operation2", null, "web", 200, false, true, "xyzzy", [UTF8BytesString.create("grault:quux")], null, null, null, null),
new AggregateMetric().recordDurations(10, new AtomicLongArray(1, 1, 200, 2, 3, 4, 5, 6, 7, 8, 9))
)
writer.finishBucket()
diff --git a/internal-api/src/main/java/datadog/trace/api/Config.java b/internal-api/src/main/java/datadog/trace/api/Config.java
index a463887f61a..cc3cace68cf 100644
--- a/internal-api/src/main/java/datadog/trace/api/Config.java
+++ b/internal-api/src/main/java/datadog/trace/api/Config.java
@@ -417,6 +417,8 @@
import static datadog.trace.api.config.GeneralConfig.TRACER_METRICS_MAX_PENDING;
import static datadog.trace.api.config.GeneralConfig.TRACE_DEBUG;
import static datadog.trace.api.config.GeneralConfig.TRACE_LOG_LEVEL;
+import static datadog.trace.api.config.GeneralConfig.TRACE_STATS_ADDITIONAL_TAGS;
+import static datadog.trace.api.config.GeneralConfig.TRACE_STATS_ADDITIONAL_TAGS_CARDINALITY_LIMIT;
import static datadog.trace.api.config.GeneralConfig.TRACE_STATS_COMPUTATION_ENABLED;
import static datadog.trace.api.config.GeneralConfig.TRACE_STATS_COMPUTATION_IGNORE_AGENT_VERSION;
import static datadog.trace.api.config.GeneralConfig.TRACE_TAGS;
@@ -5108,6 +5110,22 @@ public Set getMetricsIgnoredResources() {
return tryMakeImmutableSet(configProvider.getList(TRACER_METRICS_IGNORED_RESOURCES));
}
+ public Set getTraceStatsAdditionalTags() {
+ return tryMakeImmutableSet(configProvider.getList(TRACE_STATS_ADDITIONAL_TAGS));
+ }
+
+ public int getTraceStatsAdditionalTagsCardinalityLimit() {
+ int configured = configProvider.getInteger(TRACE_STATS_ADDITIONAL_TAGS_CARDINALITY_LIMIT, 100);
+ if (configured <= 0) {
+ log.warn(
+ "Invalid {} value: {}; falling back to default of 100",
+ TRACE_STATS_ADDITIONAL_TAGS_CARDINALITY_LIMIT,
+ configured);
+ return 100;
+ }
+ return configured;
+ }
+
public String getEnv() {
// intentionally not thread safe
if (env == null) {
diff --git a/metadata/supported-configurations.json b/metadata/supported-configurations.json
index 8db93e05399..b1c9d4288a9 100644
--- a/metadata/supported-configurations.json
+++ b/metadata/supported-configurations.json
@@ -10577,6 +10577,22 @@
"aliases": []
}
],
+ "DD_TRACE_STATS_ADDITIONAL_TAGS": [
+ {
+ "version": "B",
+ "type": "array",
+ "default": null,
+ "aliases": []
+ }
+ ],
+ "DD_TRACE_STATS_ADDITIONAL_TAGS_CARDINALITY_LIMIT": [
+ {
+ "version": "B",
+ "type": "number",
+ "default": "100",
+ "aliases": []
+ }
+ ],
"DD_TRACE_STATS_COMPUTATION_ENABLED": [
{
"version": "B",