Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions manifests/cpp.yml
Original file line number Diff line number Diff line change
Expand Up @@ -209,6 +209,8 @@ manifest:
tests/parametric/test_span_sampling.py::Test_Span_Sampling::test_single_rule_with_head_and_rule_trace_sampling_keep_019:
- declaration: bug (APMAPI-1545)
component_version: <=1.0.0
tests/parametric/test_startup_logs.py::Test_Startup_Logs::test_startup_logs_default: incomplete_test_app (Not currently testable)
tests/parametric/test_startup_logs.py::Test_Startup_Logs::test_startup_logs_enabled: incomplete_test_app (Not currently testable)
tests/parametric/test_telemetry.py::Test_Consistent_Configs: missing_feature
tests/parametric/test_telemetry.py::Test_Environment::test_telemetry_otel_env_hiding: missing_feature (Not implemented)
tests/parametric/test_telemetry.py::Test_Environment::test_telemetry_otel_env_invalid: missing_feature (Not implemented)
Expand Down
2 changes: 2 additions & 0 deletions manifests/nodejs.yml
Original file line number Diff line number Diff line change
Expand Up @@ -1911,6 +1911,8 @@ manifest:
tests/parametric/test_span_sampling.py::Test_Span_Sampling::test_keep_span_with_stats_computation_sss010: missing_feature (this has to be implemented by a lot of the tracers and we need to do a bit of work on the assert)
? tests/parametric/test_span_sampling.py::Test_Span_Sampling::test_root_span_selected_and_child_dropped_by_sss_when_dropping_policy_is_active016
: missing_feature (Not implemented)
tests/parametric/test_startup_logs.py::Test_Startup_Logs::test_startup_logs_default: missing_feature (Startup configuration log not emitted by default when DD_TRACE_STARTUP_LOGS is unset)
tests/parametric/test_startup_logs.py::Test_Startup_Logs::test_startup_logs_diagnostic_agent_unreachable: missing_feature (Diagnostic messages not emitted when agent is unreachable)
tests/parametric/test_telemetry.py::Test_Consistent_Configs: *ref_5_25_0
tests/parametric/test_telemetry.py::Test_Consistent_Configs::test_library_settings_2: missing_feature (Not implemented)
tests/parametric/test_telemetry.py::Test_Defaults: *ref_5_6_0
Expand Down
3 changes: 3 additions & 0 deletions manifests/php.yml
Original file line number Diff line number Diff line change
Expand Up @@ -680,6 +680,9 @@ manifest:
tests/parametric/test_span_sampling.py::Test_Span_Sampling::test_single_rule_rate_limiter_span_sampling_sss008: missing_feature (PHP uses a float to represent the allowance in tokens and thus accepts one more request (given the time elapsed between individual requests))
tests/parametric/test_span_sampling.py::Test_Span_Sampling::test_single_rule_with_head_and_rule_trace_sampling_drop_020: bug (APMAPI-1545)
tests/parametric/test_span_sampling.py::Test_Span_Sampling::test_single_rule_with_head_and_rule_trace_sampling_keep_019: bug (APMAPI-1545)
tests/parametric/test_startup_logs.py::Test_Startup_Logs::test_startup_logs_default: incomplete_test_app (Not currently testable)
tests/parametric/test_startup_logs.py::Test_Startup_Logs::test_startup_logs_diagnostic_agent_unreachable: missing_feature (Diagnostic messages not emitted when agent is unreachable)
tests/parametric/test_startup_logs.py::Test_Startup_Logs::test_startup_logs_enabled: incomplete_test_app (Not currently testable)
tests/parametric/test_telemetry.py::Test_Consistent_Configs: missing_feature
tests/parametric/test_telemetry.py::Test_Defaults: missing_feature
tests/parametric/test_telemetry.py::Test_Environment: missing_feature
Expand Down
3 changes: 3 additions & 0 deletions manifests/rust.yml
Original file line number Diff line number Diff line change
Expand Up @@ -217,6 +217,9 @@ manifest:
tests/parametric/test_span_sampling.py::Test_Span_Sampling::test_single_rule_with_head_and_rule_trace_sampling_drop_020: missing_feature # Created by easy win activation script
tests/parametric/test_span_sampling.py::Test_Span_Sampling::test_single_rule_with_head_and_rule_trace_sampling_keep_019: missing_feature # Created by easy win activation script
tests/parametric/test_span_sampling.py::Test_Span_Sampling::test_special_glob_characters_span_sampling_sss002: missing_feature # Created by easy win activation script
tests/parametric/test_startup_logs.py::Test_Startup_Logs::test_startup_logs_default: missing_feature (Startup logs not implemented)
tests/parametric/test_startup_logs.py::Test_Startup_Logs::test_startup_logs_diagnostic_agent_unreachable: missing_feature (Diagnostic messages not implemented)
tests/parametric/test_startup_logs.py::Test_Startup_Logs::test_startup_logs_enabled: missing_feature (Startup logs not implemented)
tests/parametric/test_telemetry.py::Test_Consistent_Configs: missing_feature
tests/parametric/test_telemetry.py::Test_Defaults: missing_feature
tests/parametric/test_telemetry.py::Test_Environment: missing_feature
Expand Down
198 changes: 198 additions & 0 deletions tests/parametric/test_startup_logs.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,198 @@
"""Test startup log behavior for tracer libraries across supported languages."""

import re

import pytest

from utils import scenarios, features, context, logger
from .conftest import APMLibrary

parametrize = pytest.mark.parametrize

# Regex pattern for matching startup log entries across all tracer libraries
STARTUP_LOG_PATTERN = r"DATADOG (TRACER )?CONFIGURATION( - (CORE|TRACING|PROFILING|.*))?"


def _get_dotnet_startup_logs(test_library: APMLibrary, *, required: bool = True) -> str | None:
"""Get .NET tracer startup logs from the container (dotnet-tracer-managed* file).

If required is True, fails the test when the file is not found or empty.
If required is False, returns None when the file is not found or empty.
"""
success, log_files = test_library.container_exec_run(
"sh -c 'find / -name \"dotnet-tracer-managed*\" -type f 2>/dev/null | head -1'"
)
if not success or not log_files or not log_files.strip():
if required:
pytest.fail("Failed to find .NET startup log file: no file matching 'dotnet-tracer-managed*' found")
return None
log_file = log_files.strip()
success, logs = test_library.container_exec_run(f"sh -c 'cat {log_file} 2>/dev/null || true'")
if not success or not logs:
if required:
pytest.fail(f"Failed to read .NET startup log file: {log_file}")
return None
return logs


def _get_startup_logs(test_library: APMLibrary, *, required: bool = True) -> str | None:
"""Get startup logs from container, handling language-specific differences.

- .NET: Reads from dotnet-tracer-managed* file
- Node.js/Ruby: Reads from stdout
- Other libraries: Reads from stderr

Args:
test_library: The APMLibrary test client
required: If True, fails test when logs not found. If False, returns None.

Returns:
Log content as string, or None if not found and not required.

"""
if context.library == "dotnet":
return _get_dotnet_startup_logs(test_library, required=required)
elif context.library in ("nodejs", "ruby"):
Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This condition will go away very soon since both languages will report stderr soon

try:
logs = test_library.container.logs(stderr=True, stdout=True).decode("utf-8")
except Exception as e:
if required:
pytest.fail(f"Failed to retrieve container logs: {e}")
return None
else:
try:
logs = test_library.container.logs(stderr=True, stdout=False).decode("utf-8")
except Exception as e:
if required:
pytest.fail(f"Failed to retrieve container logs: {e}")
return None

return logs


@scenarios.parametric
@features.log_tracer_status_at_startup
class Test_Startup_Logs:
"""Test tracer startup log behavior across all supported languages."""

def test_startup_logs_default(self, test_library: APMLibrary):
"""Verify default startup log behavior when DD_TRACE_STARTUP_LOGS is not set."""
with test_library:
# For Node.js, startup logs are emitted when the tracer tries to send its first trace
if context.library == "nodejs":
with test_library.dd_start_span("test_operation", service="test_service"):
pass
test_library.dd_flush()

logs = _get_startup_logs(test_library, required=True)

assert logs is not None
match = re.search(STARTUP_LOG_PATTERN, logs, re.IGNORECASE)
assert match, (
f"Startup log not found (default behavior). Searched for pattern: '{STARTUP_LOG_PATTERN}'. "
f"Content (first 2000 chars): {logs[:2000]}"
)

@parametrize(
"library_env",
[{"DD_TRACE_STARTUP_LOGS": "true"}],
# DD_TRACE_DEBUG and DD_TRACE_LOG_LEVEL defaults are okay here
)
def test_startup_logs_enabled(self, test_library: APMLibrary):
"""Verify startup logs are emitted when DD_TRACE_STARTUP_LOGS=true."""
with test_library:
# For Node.js, startup logs are emitted when the tracer tries to send its first trace
if context.library == "nodejs":
with test_library.dd_start_span("test_operation", service="test_service"):
pass
test_library.dd_flush()

logs = _get_startup_logs(test_library, required=True)

assert logs is not None
match = re.search(STARTUP_LOG_PATTERN, logs, re.IGNORECASE)
assert match, (
f"Startup log not found. Searched for pattern: '{STARTUP_LOG_PATTERN}'. "
f"Content (first 2000 chars): {logs[:2000]}"
)

@parametrize(
"library_env",
[
{
"DD_TRACE_STARTUP_LOGS": "false",
"DD_TRACE_DEBUG": "false", # python requires DD_TRACE_DEBUG=false to suppress startup logs
"DD_TRACE_LOG_LEVEL": "warn", # java requires DD_TRACE_LOG_LEVEL=warn to suppress startup logs
}
],
)
def test_startup_logs_disabled(self, test_library: APMLibrary):
"""Verify startup logs are suppressed when DD_TRACE_STARTUP_LOGS=false."""
with test_library:
# For Node.js, trigger a trace to ensure startup logs would be emitted if enabled
if context.library == "nodejs":
with test_library.dd_start_span("test_operation", service="test_service"):
pass
test_library.dd_flush()

logs = _get_startup_logs(test_library, required=False)
if logs is not None:
match = re.search(STARTUP_LOG_PATTERN, logs, re.IGNORECASE)
if match:
logger.error(logs)
pytest.fail(
f"Startup log found when DD_TRACE_STARTUP_LOGS=false. "
f"Found pattern: '{match.group(0)}'. "
f"Logs (first 1000 chars): {logs[:1000]}"
)

@parametrize(
"library_env",
[
{
"DD_TRACE_STARTUP_LOGS": "true",
"DD_TRACE_AGENT_URL": "http://unreachable-host-that-does-not-exist:8126",
}
],
)
def test_startup_logs_diagnostic_agent_unreachable(self, test_library: APMLibrary):
"""Verify diagnostic messages appear when agent is unreachable."""
with test_library:
# Trigger a span to force tracers to attempt connection to the agent
# Some tracers only attempt to connect when flushing spans
with test_library.dd_start_span("test_operation", service="test_service"):
pass
test_library.dd_flush()

logs = _get_startup_logs(test_library, required=True)

assert logs is not None
diagnostic_patterns = [
r"Agent not reachable",
r"Connection refused",
r"Agent Error",
r"Agent.*unreachable",
r"Failed to.*agent",
r"Could not.*connect.*agent",
r"Connection.*failed",
r"ECONNREFUSED",
r"Connection.*refused",
r"ENOTFOUND", # DNS resolution failure (Node.js)
r"getaddrinfo.*ENOTFOUND", # Node.js DNS error format
]

found_diagnostic = False
matched_pattern = None
for pattern in diagnostic_patterns:
if re.search(pattern, logs, re.IGNORECASE):
found_diagnostic = True
matched_pattern = pattern
break

assert found_diagnostic, (
f"No diagnostic message found when agent is unreachable. "
f"Searched for patterns: {diagnostic_patterns}. "
f"Logs (first 2000 chars): {logs[:2000]}"
)

logger.info(f"Found diagnostic message with pattern: {matched_pattern}")
Loading