diff --git a/config/_default/menus/main.en.yaml b/config/_default/menus/main.en.yaml index 677b5c17fb8..fc91d98f9b9 100644 --- a/config/_default/menus/main.en.yaml +++ b/config/_default/menus/main.en.yaml @@ -5868,16 +5868,21 @@ menu: parent: observability_pipelines_processors identifier: observability_pipelines_processors_split_array weight: 317 + - name: Tag Control + url: observability_pipelines/processors/tag_control + parent: observability_pipelines_processors + identifier: observability_pipelines_processors_tag_control + weight: 318 - name: Tags url: observability_pipelines/processors/tags parent: observability_pipelines_processors identifier: observability_pipelines_processors_tags - weight: 318 + weight: 319 - name: Throttle url: observability_pipelines/processors/throttle parent: observability_pipelines_processors identifier: observability_pipelines_processors_throttle - weight: 319 + weight: 320 - name: Destinations url: observability_pipelines/destinations/ parent: observability_pipelines @@ -5918,76 +5923,81 @@ menu: parent: observability_pipelines_destinations identifier: observability_pipelines_datadog_logs weight: 407 + - name: Datadog Metrics + url: observability_pipelines/destinations/datadog_metrics/ + parent: observability_pipelines_destinations + identifier: observability_pipelines_datadog_metrics + weight: 408 - name: Elasticsearch url: observability_pipelines/destinations/elasticsearch/ parent: observability_pipelines_destinations identifier: observability_pipelines_elasticsearch - weight: 408 + weight: 409 - name: Google Chronicle url: observability_pipelines/destinations/google_chronicle parent: observability_pipelines_destinations identifier: observability_pipelines_google_chronicle - weight: 409 + weight: 410 - name: Google Cloud Storage identifier: observability_pipelines_google_cloud_storage url: /observability_pipelines/destinations/google_cloud_storage/ parent: observability_pipelines_destinations - weight: 410 + weight: 411 - name: Google Pub/Sub identifier: observability_pipelines_google_pubsub url: /observability_pipelines/destinations/google_pubsub/ parent: observability_pipelines_destinations - weight: 411 + weight: 412 - name: HTTP Client url: observability_pipelines/destinations/http_client/ parent: observability_pipelines_destinations identifier: observability_pipelines_http_client - weight: 412 + weight: 413 - name: Kafka url: observability_pipelines/destinations/kafka/ parent: observability_pipelines_destinations identifier: observability_pipelines_kafka - weight: 413 + weight: 414 - name: Microsoft Sentinel identifier: observability_pipelines_microsoft_sentinel url: /observability_pipelines/destinations/microsoft_sentinel/ parent: observability_pipelines_destinations - weight: 414 + weight: 415 - name: New Relic identifier: observability_pipelines_new_relic url: /observability_pipelines/destinations/new_relic/ parent: observability_pipelines_destinations - weight: 415 + weight: 416 - name: OpenSearch url: observability_pipelines/destinations/opensearch parent: observability_pipelines_destinations identifier: observability_pipelines_opensearch - weight: 416 + weight: 417 - name: SentinelOne url: observability_pipelines/destinations/sentinelone parent: observability_pipelines_destinations identifier: observability_pipelines_sentinelone - weight: 417 + weight: 418 - name: Socket url: observability_pipelines/destinations/socket parent: observability_pipelines_destinations identifier: observability_pipelines_socket - weight: 418 + weight: 419 - name: Splunk HEC url: observability_pipelines/destinations/splunk_hec parent: observability_pipelines_destinations identifier: observability_pipelines_splunk_hec - weight: 419 + weight: 420 - name: Sumo Logic Hosted Collector url: observability_pipelines/destinations/sumo_logic_hosted_collector parent: observability_pipelines_destinations identifier: observability_pipelines_sumo_logic_hosted_collector - weight: 420 + weight: 421 - name: Syslog url: observability_pipelines/destinations/syslog parent: observability_pipelines_destinations identifier: observability_pipelines_syslog - weight: 421 + weight: 422 - name: Packs url: observability_pipelines/packs/ parent: observability_pipelines diff --git a/content/en/observability_pipelines/_index.md b/content/en/observability_pipelines/_index.md index 40450b257c0..9818d76c108 100644 --- a/content/en/observability_pipelines/_index.md +++ b/content/en/observability_pipelines/_index.md @@ -125,11 +125,11 @@ Observability Pipelines includes prebuilt templates for common data routing and {{% tab "Metrics" %}}
-Metrics Volume and Cardinality Control is in Preview. Fill out the form to request access.
+Metric Tag Governance is in Preview. Fill out the form to request access. | Template | Description | |----------|-------------| -| Metrics Volume and Cardinality Control | Manage the quality and volume of your metrics by keeping only the metrics you need, standardizing metrics tagging, and removing unwanted tags to prevent high cardinality. | +| Metric Tag Governance | Manage the quality and volume of your metrics by keeping only the metrics you need, standardizing metrics tagging, and removing unwanted tags to prevent high cardinality. | {{% /tab %}} {{< /tabs >}} diff --git a/content/en/observability_pipelines/configuration/_index.md b/content/en/observability_pipelines/configuration/_index.md index 5dede433ed0..cbb9f7915a5 100644 --- a/content/en/observability_pipelines/configuration/_index.md +++ b/content/en/observability_pipelines/configuration/_index.md @@ -28,19 +28,106 @@ Observability Pipelines lets you collect and process logs and metrics ({{< toolt Build and deploy pipelines to collect, transform, and route your data using one of these methods: - - Pipeline UI - - [API][4] - - [Terraform][5] + - [Pipeline UI][4] + - [API][5] + - [Terraform][6] - See [Set Up Pipelines][6] for source, processor, and destination configuration details. +## Pipeline types - ## Further reading +There are two types of pipelines: + +{{< tabs >}} +{{% tab "Logs" %}} + +Use one of the [logs templates][1] to create a log pipeline. + +- Archive Logs +- Dual Ship Logs +- Generate Log-based Metrics +- Log Enrichment +- Log Volume Control +- Sensitive Data Redaction +- Split Logs + +See [Set Up Pipelines][2] for more information on setting up a source, processors, and destinations. + +[1]: /observability_pipelines/configuration/explore_templates/?tab=logs#templates +[2]: /observability_pipelines/configuration/set_up_pipelines/ + +{{% /tab %}} + +{{% tab "Metrics" %}} + +
+Metric Tag Governance is in Preview. Fill out the form to request access.
+ +Use the [Metric Tag Governance][1] template to create a metrics pipeline. + +See [Set Up Pipelines][2] for more information on setting up a source, processors and destination. + +### Metrics data + +Metrics sent to Observability Pipelines include the following: + +- `name`: The metric name. +- `kind`: There are two kinds of metrics: + - `absolute` metrics: Represents the current value of a measurement at the time it is reported. + - `incremental` metrics: Represents the change in a measurement since the last reported value, which the system aggregates over time. +- `value`: The [metric type](#metric-types): + - `counter` + - `gauge` + - `distribution` + - `histogram` +- `timestamp`: The date and time the metric is created. +- `tags`: Includes tags such as `host`. + +The `counter` metric type is the only `incremental` metric. `gauge`, `distribution`, and `histogram` metric types are `absolute` metrics. + +An example of a metric: + +``` +{ +"name":"datadog.agent.retry_queue_duration.bytes_per_sec", + "tags":{ + "agent":"core", + "domain":"https://7-72-3-app.agent.datadoghq.com", + "host":"COMP-YGVQDJG75L", + "source_type_name":"System", + "env:prod" + }, + "timestamp":"2025-11-28T13:03:09Z", + "kind":"absolute", + "gauge":{"value":454.1372767857143} +} +``` + +### Metric types + +The available metric types: + +| Metric type | Description | Example | +| ----------- | ----------- | ------- | +| COUNTER | Represents the total number of event occurrences in one time interval. This value can be reset to zero, but cannot be decreased. | You want to count the number of logs with `status:error`. | +| GAUGE | Represents a snapshot of events in one time interval. | You want to measure the latest CPU utilization per host for all logs in the production environment. | +| DISTRIBUTION | Represent the global statistical distribution of a set of values calculated across your entire distributed infrastructure in one time interval. | You want to measure the average time it takes for an API call to be made. | +| HISTOGRAM | Represents the statistical distribution of a set of values calculated in one time interval. | You want to measure response time distributions for a service or endpoint. | + +See [Metric Types][3] for more information. + +[1]: /observability_pipelines/configuration/explore_templates/?tab=metrics#metric-tag-governance +[2]: /observability_pipelines/configuration/set_up_pipelines/ +[3]: /metrics/types/?tab=gauge#metric-types + +{{% /tab %}} +{{< /tabs >}} + +## Further reading {{< partial name="whats-next/whats-next.html" >}} [1]: /observability_pipelines/sources/ [2]: /observability_pipelines/processors/ [3]: /observability_pipelines/destinations/ -[4]: /api/latest/observability-pipelines/#create-a-new-pipeline -[5]: https://registry.terraform.io/providers/DataDog/datadog/latest/docs -[6]: /observability_pipelines/configuration/set_up_pipelines/ \ No newline at end of file +[4]: https://app.datadoghq.com/observability-pipelines +[5]: /api/latest/observability-pipelines/#create-a-new-pipeline +[6]: https://registry.terraform.io/providers/DataDog/datadog/latest/docs diff --git a/content/en/observability_pipelines/configuration/explore_templates.md b/content/en/observability_pipelines/configuration/explore_templates.md index c60550ec090..a639a4b51f5 100644 --- a/content/en/observability_pipelines/configuration/explore_templates.md +++ b/content/en/observability_pipelines/configuration/explore_templates.md @@ -65,14 +65,14 @@ When you have logs from different services and applications, you might need to s {{% /tab %}} {{% tab "Metrics" %}} -### Metrics Volume and Cardinality Control +### Metric Tag Governance
-Metrics Volume and Cardinality Control is in Preview. Fill out the form to request access.
+Metric Tag Governance is in Preview. Fill out the form to request access. Metrics capture signals about your environment and offer insight into your system health, business workflows, and security activities. These metrics are sent from your various applications, network devices, and nodes, but the value of individual metrics can vary significantly. -To help you manage the quality and volume of your metrics, use the Metrics Volume and Cardinality Control template to process them in Observability Pipelines before sending them to your destinations. You can use processors to keep only the metrics you need, standardize metrics tagging, and remove unwanted tags to prevent high cardinality. +To help you manage the quality and volume of your metrics, use the Metric Tag Governance template to process them in Observability Pipelines before sending them to your destinations. You can use processors to keep only the metrics you need, standardize metrics tagging, and remove unwanted tags to prevent high cardinality. {{% /tab %}} {{< /tabs >}} diff --git a/content/en/observability_pipelines/configuration/set_up_pipelines.md b/content/en/observability_pipelines/configuration/set_up_pipelines.md index 3da0e9d8b87..56e64579157 100644 --- a/content/en/observability_pipelines/configuration/set_up_pipelines.md +++ b/content/en/observability_pipelines/configuration/set_up_pipelines.md @@ -29,24 +29,21 @@ In Observability Pipelines, a pipeline is a sequential path with three types of {{< img src="observability_pipelines/archive_log_pipeline.png" alt="Pipeline with one source connected to two processor groups and two destinations" style="width:100%;" >}} -## Set up a pipeline +You can create a pipeline with one of the following methods: + +- [Pipeline UI](#set-up-a-pipeline-in-the-ui) +- [API](#set-up-a-pipeline-with-the-api) +- [Terraform](#set-up-a-pipeline-with-terraform) + +## Set up a pipeline in the UI {{< tabs >}} -{{% tab "Pipeline UI" %}} - -Set up your pipelines and its sources, processors, and destinations in the Observability Pipelines UI. - -1. Navigate to [Observability Pipelines][13]. -1. Select a template based on your use case. - - [Archive Logs][4] - - [Dual Ship Logs][5] - - [Generate Metrics][6] - - [Log Enrichment][7] - - [Log Volume Control][8] - - [Sensitive Data Redaction][9] - - [Split Logs][10] +{{% tab "Logs" %}} + +1. Navigate to [Observability Pipelines][7]. +1. Select a [template][4] based on your use case. 1. Select and set up your [source][1]. -1. Add [processors][2] to transform, redact, and enrich log data. +1. Add [processors][2] to transform, redact, and enrich your log data. - If you want to copy a processor, click the copy icon for that processor and then use `command-v` to paste it. 1. Select and set up [destinations][3] for your processed logs. @@ -59,7 +56,7 @@ Set up your pipelines and its sources, processors, and destinations in the Obser If you want to add another group of processors for a destination: 1. Click the plus sign (**+**) at the bottom of the existing processor group. 1. Click the name of the processor group to update it. -1. Optionally, enter a group filter. See [Filter Syntax][17] for more information. +1. Optionally, enter a group filter. See [Search Syntax][11] for more information. 1. Click **Add** to add processors to the group. 1. If you want to copy all processors in a group and paste them into the same processor group or a different group: 1. Click the three dots on the processor group. @@ -79,7 +76,7 @@ If you want to add another set of processors and destinations, click the plus si To delete a processor group, you need to delete all destinations linked to that processor group. When the last destination is deleted, the processor group is removed with it. -#### Add another destination to a processor group +### Add another destination to a processor group {{< img src="observability_pipelines/setup/another_destination.png" alt="The Pipelines page showing one processor group sending logs to two different destinations" style="width:100%;" >}} @@ -91,77 +88,98 @@ To delete a destination, click on the pencil icon to the top right of the destin - If you delete a destination from a processor group that has multiple destinations, only the deleted destination is removed. - If you delete a destination from a processor group that only has one destination, both the destination and the processor group are removed. +[1]: /observability_pipelines/sources/ +[2]: /observability_pipelines/processors/ +[3]: /observability_pipelines/destinations/ +[4]: /observability_pipelines/configuration/explore_templates/ +[5]: /observability_pipelines/configuration/update_existing_pipelines/ +[6]: /observability_pipelines/configuration/install_the_worker/ +[7]: https://app.datadoghq.com/observability-pipelines +[8]: /monitors/types/metric/ +[9]: /observability_pipelines/guide/environment_variables/ +[10]: /observability_pipelines/configuration/install_the_worker/advanced_worker_configurations/#bootstrap-options +[11]: /observability_pipelines/search_syntax/ + +{{% /tab %}} +{{% tab "Metrics" %}} + +1. Navigate to [Observability Pipelines][1]. +1. Select the [Metric Tag Governance][2] template. +1. Set up the [Datadog Agent][3] source. +1. Add [processors][4] to filter and transform your metrics. + - If you want to copy a processor, click the copy icon for that processor and then paste it (`Cmd+V` on Mac, `Ctrl+V` on Windows/Linux). +1. Set up the [Datadog Metrics][5] destination. + +### Add another processor group + +{{< img src="observability_pipelines/setup/another_processor_group_metrics.png" alt="The Pipelines page showing two processor groups sending logs to the same destination" style="width:100%;" >}} + +If you want to add another group of processors for a destination: +1. Click the plus sign (**+**) at the bottom of the existing processor group. +1. Click the name of the processor group to update it. +1. Optionally, enter a group filter. See [Search Syntax][6] for more information. +1. Click **Add** to add processors to the group. +1. If you want to copy all processors in a group and paste them into the same processor group or a different group: + 1. Click the three dots on the processor group. + 1. Select **Copy all processors**. + 1. Select the desired processor group, and then paste the processors into it. +1. You can toggle the switch to enable and disable the processor group and also each individual processor. + +**Notes**: There is a limit of 10 processor groups for a pipeline canvas. + +[1]: https://app.datadoghq.com/observability-pipelines +[2]: /observability_pipelines/configuration/explore_templates/?tab=metrics#metric-tag-governance +[3]: /observability_pipelines/sources/datadog_agent/?tab=metrics +[4]: /observability_pipelines/processors/ +[5]: /observability_pipelines/destinations/datadog_metrics/ +[6]: /observability_pipelines/search_syntax/metrics/ + +{{% /tab %}} +{{< /tabs >}} + ### Install the Worker and deploy the pipeline After you have set up your source, processors, and destinations: 1. Click **Next: Install**. 1. Select the platform on which you want to install the Worker. -1. Enter the [environment variables][15] for your sources and destinations, if applicable. +1. Enter the [environment variables][9] for your sources and destinations, if applicable. 1. Follow the instructions on installing the Worker for your platform. The command provided in the UI to install the Worker has the relevant environment variables populated. - - See [Install the Worker][12] for more information. - - **Note**: If you are using a proxy, see the `proxy` option in [Bootstrap options][16]. + - See [Install the Worker][6] for more information. + - **Note**: If you are using a proxy, see the `proxy` option in [Bootstrap options][10]. 1. Enable out-of-the-box monitors for your pipeline. - 1. Navigate to the [Pipelines][13] page and find your pipeline. + 1. Navigate to the [Pipelines][7] page and find your pipeline. 1. Click **Enable monitors** in the **Monitors** column for your pipeline. 1. Click **Start** to set up a monitor for one of the suggested use cases.
- - The metric monitor is configured based on the selected use case. You can update the configuration to further customize it. See the [Metric monitor documentation][14] for more information. + - The metric monitor is configured based on the selected use case. You can update the configuration to further customize it. See the [Metric monitor documentation][8] for more information. After you have set up your pipeline, see [Update Existing Pipelines][11] if you want to make any changes to it. -[1]: /observability_pipelines/sources/ -[2]: /observability_pipelines/processors/ -[3]: /observability_pipelines/destinations/ -[4]: /observability_pipelines/#archive-logs -[5]: /observability_pipelines/#dual-ship-logs -[6]: /observability_pipelines/#generate-metrics -[7]: /observability_pipelines/#log-enrichment -[8]: /observability_pipelines/#log-volume-control -[9]: /observability_pipelines/#sensitive-data-redaction -[10]: /observability_pipelines/#split-logs -[11]: /observability_pipelines/configuration/update_existing_pipelines/ -[12]: /observability_pipelines/configuration/install_the_worker/ -[13]: https://app.datadoghq.com/observability-pipelines -[14]: /monitors/types/metric/ -[15]: /observability_pipelines/guide/environment_variables/ -[16]: /observability_pipelines/configuration/install_the_worker/advanced_worker_configurations/#bootstrap-options -[17]: /observability_pipelines/processors/#filter-query-syntax +See [Advanced Worker Configurations][5] for bootstrapping options. -{{% /tab %}} -{{% tab "API" %}} +## Set up a pipeline with the API
Creating pipelines using the Observability Pipelines API is in Preview. Fill out the form to request access.
-1. You can use Observability Pipelines API to [create a pipeline][1]. +1. Use the Observability Pipelines API to [create a pipeline][6]. See the API reference for example request payloads. -1. After creating the pipeline, [install the Worker][2] to send data through the pipeline. - - See [Environment Variables][4] for the list of environment variables you need for the different sources, processor, and destinations when you install the Worker. +1. After creating the pipeline, [install the Worker][7] to send data through the pipeline. + - See [Environment Variables][9] for the list of environment variables you need for the different sources, processor, and destinations when you install the Worker. -**Note**: Pipelines created using the API are read-only in the UI. Use the [update a pipeline][3] endpoint to make any changes to an existing pipeline. +**Note**: Pipelines created using the API are read-only in the UI. Use the [update a pipeline][8] endpoint to make any changes to an existing pipeline. -[1]: /api/latest/observability-pipelines/#create-a-new-pipeline -[2]: /observability_pipelines/configuration/install_the_worker/?tab=docker#api-or-terraform-pipeline-setup -[3]: /api/latest/observability-pipelines/#update-a-pipeline -[4]: /observability_pipelines/guide/environment_variables/ +See [Advanced Worker Configurations][5] for bootstrapping options. -{{% /tab %}} -{{% tab "Terraform" %}} +## Set up a pipeline with Terraform
Creating pipelines using Terraform is in Preview. Fill out the form to request access.
-1. You can use the [datadog_observability_pipeline][1] module to create a pipeline using Terraform. +1. You can use the [datadog_observability_pipeline][10] module to create a pipeline using Terraform. -1. After creating the pipeline, [install the Worker][2] to send data through the pipeline. - - See [Environment Variables][3] for the list of environment variables you need for the different sources, processor, and destinations when you install the Worker. +1. After creating the pipeline, [install the Worker][7] to send data through the pipeline. + - See [Environment Variables][9] for the list of environment variables you need for the different sources, processor, and destinations when you install the Worker. -Pipelines created using Terraform are read-only in the UI. Use the [datadog_observability_pipeline][1] module to make any changes to an existing pipeline. - -[1]: https://registry.terraform.io/providers/DataDog/datadog/latest/docs -[2]: /observability_pipelines/configuration/install_the_worker/?tab=docker#api-or-terraform-pipeline-setup -[3]: /observability_pipelines/guide/environment_variables/ - -{{% /tab %}} -{{< /tabs >}} +Pipelines created using Terraform are read-only in the UI. Use the [datadog_observability_pipeline][10] module to make any changes to an existing pipeline. See [Advanced Worker Configurations][5] for bootstrapping options. @@ -186,8 +204,9 @@ To delete a pipeline in the UI: ## Pipeline requirements and limits - A pipeline must have at least one destination. If a processor group only has one destination, that destination cannot be deleted. -- You can add a total of three destinations for a pipeline. -- A specific destination can only be added once. For example, you cannot add multiple Splunk HEC destinations. +- For log pipelines: + - You can add a total of three destinations for a log pipeline. + - A specific destination can only be added once. For example, you cannot add multiple Splunk HEC destinations. ## Further Reading @@ -197,4 +216,10 @@ To delete a pipeline in the UI: [2]: /observability_pipelines/processors/ [3]: /observability_pipelines/destinations/ [4]: https://app.datadoghq.com/observability-pipelines -[5]: /observability_pipelines/configuration/install_the_worker/advanced_worker_configurations/ \ No newline at end of file +[5]: /observability_pipelines/configuration/install_the_worker/advanced_worker_configurations/ +[6]: /api/latest/observability-pipelines/#create-a-new-pipeline +[7]: /observability_pipelines/configuration/install_the_worker/?tab=docker#api-or-terraform-pipeline-setup +[8]: /api/latest/observability-pipelines/#update-a-pipeline +[9]: /observability_pipelines/guide/environment_variables/ +[10]: https://registry.terraform.io/providers/DataDog/datadog/latest/docs +[11]: /observability_pipelines/configuration/update_existing_pipelines/? \ No newline at end of file diff --git a/content/en/observability_pipelines/destinations/_index.md b/content/en/observability_pipelines/destinations/_index.md index fb5d47d678e..29fa5d52540 100644 --- a/content/en/observability_pipelines/destinations/_index.md +++ b/content/en/observability_pipelines/destinations/_index.md @@ -13,6 +13,68 @@ Use the Observability Pipelines Worker to send your processed logs and metrics ( Select a destination in the left navigation menu to see more information about it. +## Destinations + +These are the available destinations: + +{{< tabs >}} +{{% tab "Logs" %}} + +- [Amazon OpenSearch][1] +- [Amazon S3][2] +- [Amazon Security Lake][3] +- [Azure Storage][4] +- [Datadog CloudPrem][5] +- [CrowdStrike Next-Gen SIEM][6] +- [Datadog Logs][7] +- [Elasticsearch][8] +- [Google Chronicle][9] +- [Google Cloud Storage][10] +- [Google Pub/Sub][11] +- [HTTP Client][12] +- [Kafka][13] +- [Microsoft Sentinel][14] +- [New Relic][15] +- [OpenSearch][16] +- [SentinelOne][17] +- [Socket][18] +- [Splunk HTTP Event Collector (HEC)][19] +- [Sumo Logic Hosted Collector][20] +- [Syslog][21] + +[1]: /observability_pipelines/destinations/amazon_opensearch/ +[2]: /observability_pipelines/destinations/amazon_s3/ +[3]: /observability_pipelines/destinations/amazon_security_lake/ +[4]: /observability_pipelines/destinations/azure_storage/ +[5]: /observability_pipelines/destinations/cloudprem/ +[6]: /observability_pipelines/destinations/crowdstrike_ng_siem/ +[7]: /observability_pipelines/destinations/datadog_logs/ +[8]: /observability_pipelines/destinations/elasticsearch/ +[9]: /observability_pipelines/destinations/google_chronicle/ +[10]: /observability_pipelines/destinations/google_cloud_storage/ +[11]: /observability_pipelines/destinations/google_pubsub/ +[12]: /observability_pipelines/destinations/http_client/ +[13]: /observability_pipelines/destinations/kafka/ +[14]: /observability_pipelines/destinations/microsoft_sentinel/ +[15]: /observability_pipelines/destinations/new_relic/ +[16]: /observability_pipelines/destinations/opensearch/ +[17]: /observability_pipelines/destinations/sentinelone/ +[18]: /observability_pipelines/destinations/socket/ +[19]: /observability_pipelines/destinations/splunk_hec/ +[20]: /observability_pipelines/destinations/sumo_logic_hosted_collector/ +[21]: /observability_pipelines/destinations/syslog/ + +{{% /tab %}} + +{{% tab "Metrics" %}} + +- [Datadog Metrics][1] + +[1]: /observability_pipelines/destinations/datadog_metrics/ + +{{% /tab %}} +{{< /tabs >}} + ## Template syntax Logs are often stored in separate indexes based on log data, such as the service or environment the logs are coming from or another log attribute. In Observability Pipelines, you can use template syntax to route your logs to different indexes based on specific log fields. diff --git a/content/en/observability_pipelines/destinations/amazon_opensearch.md b/content/en/observability_pipelines/destinations/amazon_opensearch.md index d0675f52b9e..c11d6682028 100644 --- a/content/en/observability_pipelines/destinations/amazon_opensearch.md +++ b/content/en/observability_pipelines/destinations/amazon_opensearch.md @@ -1,8 +1,13 @@ --- title: Amazon OpenSearch Destination disable_toc: false +products: +- name: Logs + icon: logs --- +{{< product-availability >}} + Use Observability Pipelines' Amazon OpenSearch destination to send logs to Amazon OpenSearch. ## Setup diff --git a/content/en/observability_pipelines/destinations/amazon_s3.md b/content/en/observability_pipelines/destinations/amazon_s3.md index 0ab4f2ca284..4b8347be9a9 100644 --- a/content/en/observability_pipelines/destinations/amazon_s3.md +++ b/content/en/observability_pipelines/destinations/amazon_s3.md @@ -1,8 +1,13 @@ --- title: Amazon S3 Destination disable_toc: false +products: +- name: Logs + icon: logs --- +{{< product-availability >}} + Use the Amazon S3 destination to send logs to Amazon S3. If you want to send logs to Amazon S3 for [archiving][1] and [rehydration][2], you must [configure Log Archives](#configure-log-archives). If you don't want to rehydrate your logs in Datadog, skip to [Set up the destination for your pipeline](#set-up-the-destination-for-your-pipeline). You can also [route logs to Snowflake using the Amazon S3 destination](#route-logs-to-snowflake-using-the-amazon-s3-destination). diff --git a/content/en/observability_pipelines/destinations/amazon_security_lake.md b/content/en/observability_pipelines/destinations/amazon_security_lake.md index bca83837d95..e54b2857f39 100644 --- a/content/en/observability_pipelines/destinations/amazon_security_lake.md +++ b/content/en/observability_pipelines/destinations/amazon_security_lake.md @@ -1,8 +1,13 @@ --- title: Amazon Security Lake Destination disable_toc: false +products: +- name: Logs + icon: logs --- +{{< product-availability >}} + Use Observability Pipelines' Amazon Security Lake destination to send logs to Amazon Security Lake. ## Prerequisites diff --git a/content/en/observability_pipelines/destinations/azure_storage.md b/content/en/observability_pipelines/destinations/azure_storage.md index 8a7108dd0bd..892d17fa4e7 100644 --- a/content/en/observability_pipelines/destinations/azure_storage.md +++ b/content/en/observability_pipelines/destinations/azure_storage.md @@ -1,8 +1,13 @@ --- title: Azure Storage Destination disable_toc: false +products: +- name: Logs + icon: logs --- +{{< product-availability >}} + Use the Azure Storage destination to send logs to an Azure Storage bucket. If you want to send logs to Azure Storage for [archiving][1] and [rehydration][2], you must [configure Log Archives](#configure-log-archives). If you don't want to rehydrate logs in Datadog, skip to [Set up the destination for your pipeline](#set-up-the-destination-for-your-pipeline). ## Configure Log Archives diff --git a/content/en/observability_pipelines/destinations/cloudprem.md b/content/en/observability_pipelines/destinations/cloudprem.md index 7b3c9994234..ccc48e7c992 100644 --- a/content/en/observability_pipelines/destinations/cloudprem.md +++ b/content/en/observability_pipelines/destinations/cloudprem.md @@ -1,8 +1,13 @@ --- title: Datadog CloudPrem Destination disable_toc: false +products: +- name: Logs + icon: logs --- +{{< product-availability >}} + Use Observability Pipelines' CloudPrem destination to send logs to Datadog CloudPrem. diff --git a/content/en/observability_pipelines/destinations/crowdstrike_ng_siem.md b/content/en/observability_pipelines/destinations/crowdstrike_ng_siem.md index 65d0ccc427a..dbcc427dbcf 100644 --- a/content/en/observability_pipelines/destinations/crowdstrike_ng_siem.md +++ b/content/en/observability_pipelines/destinations/crowdstrike_ng_siem.md @@ -1,8 +1,13 @@ --- title: CrowdStrike Next-Gen SIEM Destination disable_toc: false +products: +- name: Logs + icon: logs --- +{{< product-availability >}} + Use Observability Pipelines' CrowdStrike Next-Gen SIEM destination to send logs to CrowdStrike Next-Gen SIEM. ## Setup diff --git a/content/en/observability_pipelines/destinations/datadog_logs.md b/content/en/observability_pipelines/destinations/datadog_logs.md index 6aad72cd2e6..ed454e87775 100644 --- a/content/en/observability_pipelines/destinations/datadog_logs.md +++ b/content/en/observability_pipelines/destinations/datadog_logs.md @@ -1,8 +1,13 @@ --- title: Datadog Logs Destination disable_toc: false +products: +- name: Logs + icon: logs --- +{{< product-availability >}} + Use Observability Pipelines' Datadog Logs destination to send logs to Datadog Log Management. You can also use [AWS PrivateLink](#aws-privatelink) to send logs from Observability Pipelines to Datadog. ## Setup diff --git a/content/en/observability_pipelines/destinations/datadog_metrics.md b/content/en/observability_pipelines/destinations/datadog_metrics.md new file mode 100644 index 00000000000..c40b0d21b81 --- /dev/null +++ b/content/en/observability_pipelines/destinations/datadog_metrics.md @@ -0,0 +1,54 @@ +--- +title: Datadog Metrics +description: Learn how to set up the Datadog Metrics destination. +disable_toc: false +products: +- name: Metrics + icon: metrics +--- + +{{< product-availability >}} + +Use Observability Pipelines' Datadog Metrics destination to send metrics to Datadog. You can also use [AWS PrivateLink](#aws-privatelink) to send metrics from Observability Pipelines to Datadog. + +## Setup + +Set up the Datadog Metrics destination and its environment variables when you [set up a pipeline][1]. The information below is configured in the pipelines UI. + +{{< img src="observability_pipelines/destinations/datadog_metrics_settings.png" alt="The Datadog Metrics destination settings" style="width:40%;" >}} + +### Set up the destination + +Optionally, toggle **Buffering Options** to configure how events are buffered before being sent. +**Note**: Buffering options is in {{< tooltip glossary="preview" case="title" >}}. Contact your account manager to request access. + +- If disabled, the buffer holds a maximum of 500 events. Events beyond this limit are dropped. +- If enabled: + - Select the buffer type you want to set (Memory or Disk). + - Enter the buffer size and select the unit. + +### Set the environment variables + +{{% observability_pipelines/configure_existing_pipelines/destination_env_vars/datadog %}} + +## How the destination works + +A batch of events is flushed when one of these parameters is met. See [event batching][2] for more information. + +| Max Events | Max Bytes | Timeout (seconds) | +|----------------|-----------------|---------------------| +| 100,000 | None | 2 | + +## AWS PrivateLink + +To send metrics from Observability Pipelines to Datadog using AWS PrivateLink, see [Connect to Datadog over AWS PrivateLink][3] for setup instructions. The two endpoints you need to set up are: + +- Metrics: {{< region-param key=metrics_endpoint_private_link code="true" >}} +- Remote Configuration: {{< region-param key=remote_config_endpoint_private_link code="true" >}} + +**Note**: The `obpipeline-intake.datadoghq.com` endpoint is used for Live Capture and is not available as a PrivateLink endpoint. + +[1]: https://app.datadoghq.com/observability-pipelines +[2]: https://docs.datadoghq.com/observability_pipelines/destinations/#event-batching +[3]: https://docs.datadoghq.com/agent/guide/private-link/?tab=crossregionprivatelinkendpoints +[4]: http://config.datadoghq.com diff --git a/content/en/observability_pipelines/destinations/elasticsearch.md b/content/en/observability_pipelines/destinations/elasticsearch.md index 66fb4bc81b9..40b07159833 100644 --- a/content/en/observability_pipelines/destinations/elasticsearch.md +++ b/content/en/observability_pipelines/destinations/elasticsearch.md @@ -1,8 +1,13 @@ --- title: Elasticsearch Destination disable_toc: false +products: +- name: Logs + icon: logs --- +{{< product-availability >}} + Use Observability Pipelines' Elasticsearch destination to send logs to Elasticsearch. ## Setup diff --git a/content/en/observability_pipelines/destinations/google_chronicle.md b/content/en/observability_pipelines/destinations/google_chronicle.md index 82beb1b5b65..38477a28b68 100644 --- a/content/en/observability_pipelines/destinations/google_chronicle.md +++ b/content/en/observability_pipelines/destinations/google_chronicle.md @@ -1,7 +1,13 @@ --- title: Google Chronicle Destination disable_toc: false +products: +- name: Logs + icon: logs --- + +{{< product-availability >}} + Use Observability Pipelines' Google Chronicle destination to send logs to Google Chronicle. The Observability Pipelines Worker uses standard Google authentication methods. See [Authentication methods at Google][3] for more information about choosing the authentication method for your use case. diff --git a/content/en/observability_pipelines/destinations/google_cloud_storage.md b/content/en/observability_pipelines/destinations/google_cloud_storage.md index e097243bb56..2b0bf471b7e 100644 --- a/content/en/observability_pipelines/destinations/google_cloud_storage.md +++ b/content/en/observability_pipelines/destinations/google_cloud_storage.md @@ -1,9 +1,14 @@ --- title: Google Cloud Storage Destination disable_toc: false +products: +- name: Logs + icon: logs --- -
For Worker versions 2.7 and later, the Google Cloud destination supports uniform bucket-level access. Google recommends using uniform bucket-level access.
For Worker version older than 2.7, only Access Control Lists is supported.
+{{< product-availability >}} + +
For Worker versions 2.7 and later, the Google Cloud destination supports uniform bucket-level access. Google recommends using uniform bucket-level access.
For Worker version older than 2.7, only Access Control Lists is supported.
Use the Google Cloud Storage destination to send your logs to a Google Cloud Storage bucket. If you want to send logs to Google Cloud Storage for [archiving][1] and [rehydration][2], you must [configure Log Archives](#configure-log-archives). If you do not want to rehydrate logs in Datadog, skip to [Set up the destination for your pipeline](#set-up-the-destinations). diff --git a/content/en/observability_pipelines/destinations/google_pubsub.md b/content/en/observability_pipelines/destinations/google_pubsub.md index 394dbd15807..1f0d8650fc8 100644 --- a/content/en/observability_pipelines/destinations/google_pubsub.md +++ b/content/en/observability_pipelines/destinations/google_pubsub.md @@ -1,8 +1,13 @@ --- title: Google Pub/Sub Destination disable_toc: false +products: +- name: Logs + icon: logs --- +{{< product-availability >}} + ## Overview Use Observability Pipelines' Google Pub/Sub destination to publish logs to the Google Pub/Sub messaging system, so the logs can be sent to downstream services, data lakes, or custom applications. diff --git a/content/en/observability_pipelines/destinations/http_client.md b/content/en/observability_pipelines/destinations/http_client.md index 16c654e19d7..73955c889c0 100644 --- a/content/en/observability_pipelines/destinations/http_client.md +++ b/content/en/observability_pipelines/destinations/http_client.md @@ -1,8 +1,13 @@ --- title: HTTP Client Destination disable_toc: false +products: +- name: Logs + icon: logs --- +{{< product-availability >}} + ## Overview Use Observability Pipelines' HTTP Client destination to send logs to an HTTP client, such as a logging platform or SIEM. diff --git a/content/en/observability_pipelines/destinations/kafka.md b/content/en/observability_pipelines/destinations/kafka.md index 62bd8fdb254..4f1d77c94d5 100644 --- a/content/en/observability_pipelines/destinations/kafka.md +++ b/content/en/observability_pipelines/destinations/kafka.md @@ -1,8 +1,13 @@ --- title: Kafka Destination disable_toc: false +products: +- name: Logs + icon: logs --- +{{< product-availability >}} + ## Overview Use Observability Pipelines' Kafka destination to send logs to Kafka topics. diff --git a/content/en/observability_pipelines/destinations/microsoft_sentinel.md b/content/en/observability_pipelines/destinations/microsoft_sentinel.md index dce3e8bc3a9..808f3caba7d 100644 --- a/content/en/observability_pipelines/destinations/microsoft_sentinel.md +++ b/content/en/observability_pipelines/destinations/microsoft_sentinel.md @@ -1,8 +1,13 @@ --- title: Microsoft Sentinel Destination disable_toc: false +products: +- name: Logs + icon: logs --- +{{< product-availability >}} + Use Observability Pipelines' Microsoft Sentinel destination to send logs to Microsoft Sentinel. See [Logs Ingestion API][3] for API call limits in Microsoft Sentinel. ## Setup diff --git a/content/en/observability_pipelines/destinations/new_relic.md b/content/en/observability_pipelines/destinations/new_relic.md index 9d27054fc2d..0bccd9ddb88 100644 --- a/content/en/observability_pipelines/destinations/new_relic.md +++ b/content/en/observability_pipelines/destinations/new_relic.md @@ -1,8 +1,13 @@ --- title: New Relic Destination disable_toc: false +products: +- name: Logs + icon: logs --- +{{< product-availability >}} + Use Observability Pipelines' New Relic destination to send logs to New Relic. ## Setup diff --git a/content/en/observability_pipelines/destinations/opensearch.md b/content/en/observability_pipelines/destinations/opensearch.md index d6ed99a386f..76b8f25b6ab 100644 --- a/content/en/observability_pipelines/destinations/opensearch.md +++ b/content/en/observability_pipelines/destinations/opensearch.md @@ -1,8 +1,13 @@ --- title: OpenSearch Destination disable_toc: false +products: +- name: Logs + icon: logs --- +{{< product-availability >}} + Use Observability Pipelines' OpenSearch destination to send logs to OpenSearch. ## Setup diff --git a/content/en/observability_pipelines/destinations/sentinelone.md b/content/en/observability_pipelines/destinations/sentinelone.md index e592736d50a..049f02badc3 100644 --- a/content/en/observability_pipelines/destinations/sentinelone.md +++ b/content/en/observability_pipelines/destinations/sentinelone.md @@ -5,8 +5,13 @@ further_reading: - link: "https://www.datadoghq.com/blog/observability-pipelines-sentinelone/" tag: "blog" text: "Optimize EDR logs and route them to SentinelOne with Observability Pipelines" +products: +- name: Logs + icon: logs --- +{{< product-availability >}} + Use Observability Pipelines' SentinelOne destination to send logs to SentinelOne. ## Setup diff --git a/content/en/observability_pipelines/destinations/socket.md b/content/en/observability_pipelines/destinations/socket.md index 700858d170b..4889cc7de9f 100644 --- a/content/en/observability_pipelines/destinations/socket.md +++ b/content/en/observability_pipelines/destinations/socket.md @@ -1,8 +1,13 @@ --- title: Socket Destination disable_toc: false +products: +- name: Logs + icon: logs --- +{{< product-availability >}} + Use Observability Pipelines' Socket destination to send logs to a socket endpoint. ## Setup diff --git a/content/en/observability_pipelines/destinations/splunk_hec.md b/content/en/observability_pipelines/destinations/splunk_hec.md index c6009813277..754bcc93d94 100644 --- a/content/en/observability_pipelines/destinations/splunk_hec.md +++ b/content/en/observability_pipelines/destinations/splunk_hec.md @@ -1,8 +1,13 @@ --- title: Splunk HTTP Event Collector (HEC) Destination disable_toc: false +products: +- name: Logs + icon: logs --- +{{< product-availability >}} + Use Observability Pipelines' Splunk HTTP Event Collector (HEC) destination to send logs to Splunk HEC. ## Setup diff --git a/content/en/observability_pipelines/destinations/sumo_logic_hosted_collector.md b/content/en/observability_pipelines/destinations/sumo_logic_hosted_collector.md index 6e60856ba87..82235576bc2 100644 --- a/content/en/observability_pipelines/destinations/sumo_logic_hosted_collector.md +++ b/content/en/observability_pipelines/destinations/sumo_logic_hosted_collector.md @@ -1,8 +1,13 @@ --- title: Sumo Logic Hosted Collector Destination disable_toc: false +products: +- name: Logs + icon: logs --- +{{< product-availability >}} + Use Observability Pipelines' Sumo Logic destination to send logs to your Sumo Logic Hosted Collector. ## Setup diff --git a/content/en/observability_pipelines/destinations/syslog.md b/content/en/observability_pipelines/destinations/syslog.md index 7597614f72e..6dfffece8cf 100644 --- a/content/en/observability_pipelines/destinations/syslog.md +++ b/content/en/observability_pipelines/destinations/syslog.md @@ -1,8 +1,13 @@ --- title: Syslog Destinations disable_toc: false +products: +- name: Logs + icon: logs --- +{{< product-availability >}} + Use Observability Pipelines' syslog destinations to send logs to rsyslog or syslog-ng. ## Setup diff --git a/content/en/observability_pipelines/guide/environment_variables.md b/content/en/observability_pipelines/guide/environment_variables.md index 01e87af347f..32e2aec5d92 100644 --- a/content/en/observability_pipelines/guide/environment_variables.md +++ b/content/en/observability_pipelines/guide/environment_variables.md @@ -83,7 +83,10 @@ Some Observability Pipelines components require setting up environment variables ### CrowdStrike NG-SIEM {{% observability_pipelines/configure_existing_pipelines/destination_env_vars/crowdstrike_ng_siem %}} -### Datadog +### Datadog Logs +{{% observability_pipelines/configure_existing_pipelines/destination_env_vars/datadog %}} + +### Datadog Metrics {{% observability_pipelines/configure_existing_pipelines/destination_env_vars/datadog %}} ### Datadog Archives diff --git a/content/en/observability_pipelines/processors/_index.md b/content/en/observability_pipelines/processors/_index.md index 3c4a3839ff6..ba7e07b676a 100644 --- a/content/en/observability_pipelines/processors/_index.md +++ b/content/en/observability_pipelines/processors/_index.md @@ -20,9 +20,68 @@ Processor groups are executed from top to bottom. The order of the processors is Select a processor in the left navigation menu to see more information about it. +## Processors + +These are the available processors: + +{{< tabs >}} +{{% tab "Logs" %}} + +- [Add Environment Variables Processor][1] +- [Add Hostname Processor][2] +- [Custom Processor][3] +- [Deduplicate Processor][4] +- [Edit Fields Processor][5] +- [Enrichment Table Processor][6] +- [Filter Processor][7] +- [Generate Metrics Processor][8] +- [Grok Parser Processor][9] +- [Parse JSON Processor][10] +- [Parse XML Processor][11] +- [Quota Processor][12] +- [Reduce Processor][13] +- [Remap to OCSF Processor][14] +- [Sample Processor][15] +- [Sensitive Data Scanner Processor][16] +- [Split Array][17] +- [Tags][18] +- [Throttle][19] + +[1]: /observability_pipelines/processors/add_environment_variables/ +[2]: /observability_pipelines/processors/add_hostname/ +[3]: /observability_pipelines/processors/custom_processor/ +[4]: /observability_pipelines/processors/dedupe/ +[5]: /observability_pipelines/processors/edit_fields/ +[6]: /observability_pipelines/processors/enrichment_table/ +[7]: /observability_pipelines/processors/filter/ +[8]: /observability_pipelines/processors/generate_metrics/ +[9]: /observability_pipelines/processors/grok_parser/ +[10]: /observability_pipelines/processors/parse_json/ +[11]: /observability_pipelines/processors/parse_xml/ +[12]: /observability_pipelines/processors/quota/ +[13]: /observability_pipelines/processors/reduce/ +[14]: /observability_pipelines/processors/remap_ocsf/ +[15]: /observability_pipelines/processors/sample/ +[16]: /observability_pipelines/processors/sensitive_data_scanner/ +[17]: /observability_pipelines/processors/split_array/ +[18]: /observability_pipelines/processors/tags/ +[19]: /observability_pipelines/processors/throttle/ + +{{% /tab %}} +{{% tab "Metrics" %}} + +- [Filter][1] +- [Tag Control][2] + +[1]: /observability_pipelines/processors/filter/ +[2]: /observability_pipelines/processors/tag_control/ + +{{% /tab %}} +{{< /tabs >}} + ## Processor groups -
Configuring a pipeline with processor groups is only available for Worker versions 2.7 and later.
+
Configuring a pipeline with processor groups is only available for Worker versions 2.7 and later.
{{< img src="observability_pipelines/processors/processor_groups.png" alt="Your image description" style="width:100%;" >}} @@ -32,10 +91,6 @@ Processor groups and the processors within each group are executed from top to b **Note**: There is a limit of 10 processor groups for a pipeline canvas. For example, if you have a dual ship pipeline, where there are two destinations and each destination has its own set of processor groups, the combined number of processor groups from both sets is limited to 10. -{{% observability_pipelines/processors/filter_syntax %}} - -[1]: https://app.datadoghq.com/observability-pipelines - ## Further Reading -{{< partial name="whats-next/whats-next.html" >}} +{{< partial name="whats-next/whats-next.html" >}} \ No newline at end of file diff --git a/content/en/observability_pipelines/processors/add_environment_variables.md b/content/en/observability_pipelines/processors/add_environment_variables.md index 885eabbeba5..13f4244c325 100644 --- a/content/en/observability_pipelines/processors/add_environment_variables.md +++ b/content/en/observability_pipelines/processors/add_environment_variables.md @@ -1,8 +1,13 @@ --- title: Add Environment Variables Processor disable_toc: false +products: +- name: Logs + icon: logs --- +{{< product-availability >}} + {{% observability_pipelines/processors/add_env_vars %}} {{% observability_pipelines/processors/filter_syntax %}} diff --git a/content/en/observability_pipelines/processors/add_hostname.md b/content/en/observability_pipelines/processors/add_hostname.md index f746621a72b..e1f817a0140 100644 --- a/content/en/observability_pipelines/processors/add_hostname.md +++ b/content/en/observability_pipelines/processors/add_hostname.md @@ -1,8 +1,13 @@ --- title: Add Hostname Processor disable_toc: false +products: +- name: Logs + icon: logs --- +{{< product-availability >}} + {{% observability_pipelines/processors/add_hostname %}} {{% observability_pipelines/processors/filter_syntax %}} \ No newline at end of file diff --git a/content/en/observability_pipelines/processors/custom_processor.md b/content/en/observability_pipelines/processors/custom_processor.md index 4a4b14cef82..906c6a0c28a 100644 --- a/content/en/observability_pipelines/processors/custom_processor.md +++ b/content/en/observability_pipelines/processors/custom_processor.md @@ -5,8 +5,13 @@ further_reading: - link: "/observability_pipelines/guide/remap_reserved_attributes/" tag: "documentation" text: "Remap reserved attributes" +products: +- name: Logs + icon: logs --- +{{< product-availability >}} + {{% observability_pipelines/processors/custom_processor %}} ## Custom functions diff --git a/content/en/observability_pipelines/processors/dedupe.md b/content/en/observability_pipelines/processors/dedupe.md index 79ba2d5c34c..80da5d6ac36 100644 --- a/content/en/observability_pipelines/processors/dedupe.md +++ b/content/en/observability_pipelines/processors/dedupe.md @@ -1,8 +1,13 @@ --- title: Deduplicate Processor disable_toc: false +products: +- name: Logs + icon: logs --- +{{< product-availability >}} + {{% observability_pipelines/processors/dedupe %}} {{% observability_pipelines/processors/filter_syntax %}} \ No newline at end of file diff --git a/content/en/observability_pipelines/processors/edit_fields.md b/content/en/observability_pipelines/processors/edit_fields.md index a6ba51a6995..4ff64bee9f2 100644 --- a/content/en/observability_pipelines/processors/edit_fields.md +++ b/content/en/observability_pipelines/processors/edit_fields.md @@ -5,8 +5,13 @@ further_reading: - link: "/observability_pipelines/guide/remap_reserved_attributes/" tag: "documentation" text: "Remap reserved attributes" +products: +- name: Logs + icon: logs --- +{{< product-availability >}} + {{% observability_pipelines/processors/remap %}} {{% observability_pipelines/processors/filter_syntax %}} diff --git a/content/en/observability_pipelines/processors/enrichment_table.md b/content/en/observability_pipelines/processors/enrichment_table.md index 6c211147f52..58bae8ea646 100644 --- a/content/en/observability_pipelines/processors/enrichment_table.md +++ b/content/en/observability_pipelines/processors/enrichment_table.md @@ -1,8 +1,13 @@ --- title: Enrichment Table Processor disable_toc: false +products: +- name: Logs + icon: logs --- +{{< product-availability >}} + {{% observability_pipelines/processors/enrichment_table %}} {{% observability_pipelines/processors/filter_syntax %}} \ No newline at end of file diff --git a/content/en/observability_pipelines/processors/filter.md b/content/en/observability_pipelines/processors/filter.md index ab983cabbed..f60efd8cb63 100644 --- a/content/en/observability_pipelines/processors/filter.md +++ b/content/en/observability_pipelines/processors/filter.md @@ -8,12 +8,62 @@ further_reading: - link: /logs/explorer/search_syntax/ tag: "Documentation" text: Log Management Search Syntax +products: +- name: Logs + icon: logs +- name: Metrics + icon: metrics --- -{{% observability_pipelines/processors/filter %}} +{{< product-availability >}} -{{% observability_pipelines/processors/filter_syntax %}} +## Overview + +This processor drops all logs or metrics ({{< tooltip glossary="preview" case="title" >}}) that do not match the specified filter query. If a log or metric is dropped, the data isn't sent to any subsequent processors or destinations. + +## Setup + +To set up the filter processor: + +- Define a **filter query**. + - Logs or metrics that match the [query](#filter-query-syntax) are sent to the next component. + - Logs or metrics that don't match the query are dropped. + +## Filter query syntax + +Each processor has a corresponding filter query in their fields. Processors only process logs or metrics that match their filter query. + +The following are filter query examples: + +{{< tabs >}} +{{% tab "Logs" %}} + +- `NOT (status:debug)`: This filters for logs that do not have the status `DEBUG`. +- `status:ok service:flask-web-app`: This filters for all logs with the status `OK` from your `flask-web-app` service. + - This query can also be written as: `status:ok AND service:flask-web-app`. +- `host:COMP-A9JNGYK OR host:COMP-J58KAS`: This filter query only matches logs from the labeled hosts. +- `user.status:inactive`: This filters for logs with the status `inactive` nested under the `user` attribute. +- `http.status:[200 TO 299]` or `http.status:{300 TO 399}`: These two filters represent the syntax to query a range for `http.status`. Ranges can be used across any attribute. + +Learn more about writing log filter queries in [Log Search Syntax][1]. + +[1]: /observability_pipelines/search_syntax/logs/ + +{{% /tab %}} + +{{% tab "Metrics" %}} + +- `NOT system.cpu.user`: This filters for metrics that do not have the field `name:system.cpu.user`. +- `system.cpu.user OR system.cpu.user.total`: This filter query only matches metrics that have either `name:system.cpu.user` or `name:system.cpu.user.total`. +- `tags:(env\:prod OR env\:test)`: This filters for metrics with `env:prod` or `env:test` in `tags`. + +Learn more about writing metrics filter queries in [Metrics Search Syntax][1]. + +[1]: /observability_pipelines/search_syntax/metrics/ + +{{% /tab %}} +{{< /tabs >}} ## Further reading -{{< partial name="whats-next/whats-next.html" >}} \ No newline at end of file +{{< partial name="whats-next/whats-next.html" >}} diff --git a/content/en/observability_pipelines/processors/generate_metrics.md b/content/en/observability_pipelines/processors/generate_metrics.md index efcf2183087..c7550163e08 100644 --- a/content/en/observability_pipelines/processors/generate_metrics.md +++ b/content/en/observability_pipelines/processors/generate_metrics.md @@ -1,8 +1,13 @@ --- -title: Generate Metrics Processor +title: Generate Log-based Metrics Processor disable_toc: false +products: +- name: Logs + icon: logs --- +{{< product-availability >}} + {{% observability_pipelines/processors/generate_metrics %}} {{% observability_pipelines/processors/filter_syntax %}} \ No newline at end of file diff --git a/content/en/observability_pipelines/processors/grok_parser.md b/content/en/observability_pipelines/processors/grok_parser.md index ef55241e119..8faeaabfcb8 100644 --- a/content/en/observability_pipelines/processors/grok_parser.md +++ b/content/en/observability_pipelines/processors/grok_parser.md @@ -1,8 +1,13 @@ --- title: Grok Parser Processor disable_toc: false +products: +- name: Logs + icon: logs --- +{{< product-availability >}} + {{% observability_pipelines/processors/grok_parser %}} {{% observability_pipelines/processors/filter_syntax %}} \ No newline at end of file diff --git a/content/en/observability_pipelines/processors/parse_json.md b/content/en/observability_pipelines/processors/parse_json.md index ff223e59dec..aaf7aed9ebf 100644 --- a/content/en/observability_pipelines/processors/parse_json.md +++ b/content/en/observability_pipelines/processors/parse_json.md @@ -1,8 +1,13 @@ --- title: Parse JSON Processor disable_toc: false +products: +- name: Logs + icon: logs --- +{{< product-availability >}} + {{% observability_pipelines/processors/parse_json %}} {{% observability_pipelines/processors/filter_syntax %}} \ No newline at end of file diff --git a/content/en/observability_pipelines/processors/parse_xml.md b/content/en/observability_pipelines/processors/parse_xml.md index 0df579a9ca9..86285f8adfc 100644 --- a/content/en/observability_pipelines/processors/parse_xml.md +++ b/content/en/observability_pipelines/processors/parse_xml.md @@ -5,8 +5,13 @@ further_reading: - link: "https://www.datadoghq.com/blog/observability-pipelines-parsing-xml-logs/" tag: "Blog" text: "Simplify XML log collection and processing with Observability Pipelines" +products: +- name: Logs + icon: logs --- +{{< product-availability >}} + {{% observability_pipelines/processors/parse_xml %}} {{% observability_pipelines/processors/filter_syntax %}} diff --git a/content/en/observability_pipelines/processors/quota.md b/content/en/observability_pipelines/processors/quota.md index 1ab54963831..a76d7f0f6cf 100644 --- a/content/en/observability_pipelines/processors/quota.md +++ b/content/en/observability_pipelines/processors/quota.md @@ -1,8 +1,13 @@ --- title: Quota Processor disable_toc: false +products: +- name: Logs + icon: logs --- +{{< product-availability >}} + {{% observability_pipelines/processors/quota %}} {{% observability_pipelines/processors/filter_syntax %}} \ No newline at end of file diff --git a/content/en/observability_pipelines/processors/reduce.md b/content/en/observability_pipelines/processors/reduce.md index cb51e0afd34..b6e8834a452 100644 --- a/content/en/observability_pipelines/processors/reduce.md +++ b/content/en/observability_pipelines/processors/reduce.md @@ -1,8 +1,13 @@ --- title: Reduce Processor disable_toc: false +products: +- name: Logs + icon: logs --- +{{< product-availability >}} + {{% observability_pipelines/processors/reduce %}} {{% observability_pipelines/processors/filter_syntax %}} \ No newline at end of file diff --git a/content/en/observability_pipelines/processors/remap_ocsf.md b/content/en/observability_pipelines/processors/remap_ocsf.md index 86b86f897a9..6ce486573e2 100644 --- a/content/en/observability_pipelines/processors/remap_ocsf.md +++ b/content/en/observability_pipelines/processors/remap_ocsf.md @@ -1,8 +1,13 @@ --- title: Remap to OCSF Processor disable_toc: false +products: +- name: Logs + icon: logs --- +{{< product-availability >}} + {{% observability_pipelines/processors/remap_ocsf %}} {{% collapse-content title="Library mapping" level="h5" expanded=false id="library_mapping" %}} diff --git a/content/en/observability_pipelines/processors/sample.md b/content/en/observability_pipelines/processors/sample.md index 9408fa2366e..0548eb08277 100644 --- a/content/en/observability_pipelines/processors/sample.md +++ b/content/en/observability_pipelines/processors/sample.md @@ -1,8 +1,13 @@ --- title: Sample Processor disable_toc: false +products: +- name: Logs + icon: logs --- +{{< product-availability >}} + {{% observability_pipelines/processors/sample %}} {{% observability_pipelines/processors/filter_syntax %}} \ No newline at end of file diff --git a/content/en/observability_pipelines/processors/sensitive_data_scanner.md b/content/en/observability_pipelines/processors/sensitive_data_scanner.md index 84a30d800e2..d9cb30fc4ad 100644 --- a/content/en/observability_pipelines/processors/sensitive_data_scanner.md +++ b/content/en/observability_pipelines/processors/sensitive_data_scanner.md @@ -1,8 +1,13 @@ --- title: Sensitive Data Scanner Processor disable_toc: false +products: +- name: Logs + icon: logs --- +{{< product-availability >}} + {{% observability_pipelines/processors/sensitive_data_scanner %}} {{% collapse-content title="Add rules from the library" level="h5" %}} diff --git a/content/en/observability_pipelines/processors/split_array.md b/content/en/observability_pipelines/processors/split_array.md index 7878c436fc9..b4a2f16774b 100644 --- a/content/en/observability_pipelines/processors/split_array.md +++ b/content/en/observability_pipelines/processors/split_array.md @@ -1,8 +1,13 @@ --- title: Split Array Processor disable_toc: false +products: +- name: Logs + icon: logs --- +{{< product-availability >}} + {{% observability_pipelines/processors/split_array %}} {{% observability_pipelines/processors/filter_syntax %}} \ No newline at end of file diff --git a/content/en/observability_pipelines/processors/tag_control.md b/content/en/observability_pipelines/processors/tag_control.md new file mode 100644 index 00000000000..5f42d4124f0 --- /dev/null +++ b/content/en/observability_pipelines/processors/tag_control.md @@ -0,0 +1,53 @@ +--- +title: Tag Control +description: Learn how to use the Tag Control processor for metrics. +disable_toc: false +products: +- name: Metrics + icon: metrics +--- + +{{< product-availability >}} + +## Overview + +Use this processor to include or exclude specific tags in your metrics. Tags that are excluded or not included are dropped and may reduce your outbound metrics tag volume. + +The following tags can't be dropped because they provide specific platform functionality: + +- `host` +- `service` +- `ddsource` +- `function_arn` +- `datadog_` +- `_dd.*` + +## Setup + +To set up the processor: + +Click **Add tag rule**. +- If you haven't added any rules yet, enter the rule details as described in the [Add a tag rule](#add-a-tag-rule) section to create a rule. +- If you have already added rules, you can: + - Click on a rule in the table to edit or delete it. + - Use the search bar to find a specific rule by rule query, tag rule type, or tags applied and then select the metric to edit or delete it. + - Click **New Tag Rule** to add a rule. + +### Add a tag rule + +{{< img src="observability_pipelines/processors/tag_control_settings.png" alt="The tag control settings panel" style="width:55%;" >}} + +1. Define a filter query. Only matching metrics are processed by this processor, but all metrics continue to the next step in the pipeline. See [Filter query syntax](#filter-query-syntax) for more information. +1. In the **Configure tags** section, choose whether to **Include tags** or **Exclude tags**. +1. Optionally, input a sample tags object to help you select the tags you want to include or exclude in the next step. + - The supported input formats are `{"key1":"value1", "key2":"value2"}`. + - See [Define Tags][1] for more information about the `key:value` format. +1. If you provided a tag array in the previous step, select the tag keys you want to configure in the dropdown menu. You can also manually add tag keys. + - Note: You can select up to 15 tags. +1. Click **Save**. + +## Filter query syntax + +{{% observability_pipelines/processors/filter_syntax_metrics %}} + +[1]: /may/op-metrics-pipelines-components/getting_started/tagging/#define-tags \ No newline at end of file diff --git a/content/en/observability_pipelines/processors/tags.md b/content/en/observability_pipelines/processors/tags.md index df2f4387147..2c3b96dd737 100644 --- a/content/en/observability_pipelines/processors/tags.md +++ b/content/en/observability_pipelines/processors/tags.md @@ -1,8 +1,13 @@ --- title: Tags Processor disable_toc: false +products: +- name: Logs + icon: logs --- +{{< product-availability >}} + {{% observability_pipelines/processors/tags_processor %}} {{% observability_pipelines/processors/filter_syntax %}} \ No newline at end of file diff --git a/content/en/observability_pipelines/processors/throttle.md b/content/en/observability_pipelines/processors/throttle.md index a133e5359c3..b06da1c6d25 100644 --- a/content/en/observability_pipelines/processors/throttle.md +++ b/content/en/observability_pipelines/processors/throttle.md @@ -1,8 +1,13 @@ --- title: Throttle Processor disable_toc: false +products: +- name: Logs + icon: logs --- +{{< product-availability >}} + {{% observability_pipelines/processors/throttle %}} {{% observability_pipelines/processors/filter_syntax %}} \ No newline at end of file diff --git a/content/en/observability_pipelines/sources/_index.md b/content/en/observability_pipelines/sources/_index.md index 25111a62031..453317e5cbf 100644 --- a/content/en/observability_pipelines/sources/_index.md +++ b/content/en/observability_pipelines/sources/_index.md @@ -19,6 +19,63 @@ Use Observability Pipelines' sources to receive logs or metrics ({{< tooltip glo Select a source in the left navigation menu to see more information about it. +## Sources + +These are the available sources: + +{{< tabs >}} +{{% tab "Logs" %}} + +- [Amazon Data Firehose][1] +- [Amazon S3][2] +- [Azure Event Hubs][3] +- [Datadog Agent][4] +- [Filebeat][5] +- [Fluentd and Fluent Bit][6] +- [Google Pub/Sub][7] +- [HTTP Client][8] +- [HTTP Server][9] +- [Kafka][10] +- [Lambda Extension][11] +- [Lambda Forwarder][12] +- [Logstash][13] +- [OpenTelemetry][14] +- [Socket][15] +- [Splunk HTTP Event Collector (HEC)][16] +- [Splunk Heavy or Universal Forwarders (TCP)][17] +- [Sumo Logic Hosted Collector][18] +- [Syslog][19] + +[1]: /observability_pipelines/sources/amazon_data_firehose/ +[2]: /observability_pipelines/sources/amazon_s3/ +[3]: /observability_pipelines/sources/azure_event_hubs/ +[4]: /observability_pipelines/sources/datadog_agent/ +[5]: /observability_pipelines/sources/filebeat/ +[6]: /observability_pipelines/sources/fluent/ +[7]: /observability_pipelines/sources/google_pubsub/ +[8]: /observability_pipelines/sources/http_client/ +[9]: /observability_pipelines/sources/http_server/ +[10]: /observability_pipelines/sources/kafka/ +[11]: /observability_pipelines/sources/lambda_extension/ +[12]: /observability_pipelines/sources/lambda_forwarder/ +[13]: /observability_pipelines/sources/logstash/ +[14]: /observability_pipelines/sources/opentelemetry/ +[15]: /observability_pipelines/sources/socket/ +[16]: /observability_pipelines/sources/splunk_hec/ +[17]: /observability_pipelines/sources/splunk_tcp/ +[18]: /observability_pipelines/sources/sumo_logic/ +[19]: /observability_pipelines/sources/syslog/ + +{{% /tab %}} +{{% tab "Metrics" %}} + +- [Datadog Agent][1] + +[1]: /observability_pipelines/sources/datadog_agent/ + +{{% /tab %}} +{{< /tabs >}} + ## Standard metadata fields All sources add the following standard metadata fields to ingested events: diff --git a/content/en/observability_pipelines/sources/amazon_data_firehose.md b/content/en/observability_pipelines/sources/amazon_data_firehose.md index 515e29f0d6a..7c774cd2397 100644 --- a/content/en/observability_pipelines/sources/amazon_data_firehose.md +++ b/content/en/observability_pipelines/sources/amazon_data_firehose.md @@ -1,8 +1,13 @@ --- title: Amazon Data Firehose Source disable_toc: false +products: +- name: Logs + icon: logs --- +{{< product-availability >}} + Use Observability Pipelines' Amazon Data Firehose source to receive logs from Amazon Data Firehose. Select and set up this source when you [set up a pipeline][1]. ## Prerequisites diff --git a/content/en/observability_pipelines/sources/azure_event_hubs.md b/content/en/observability_pipelines/sources/azure_event_hubs.md index 892e0809853..f3d06283c35 100644 --- a/content/en/observability_pipelines/sources/azure_event_hubs.md +++ b/content/en/observability_pipelines/sources/azure_event_hubs.md @@ -1,8 +1,13 @@ --- title: Send Azure Event Hubs Logs to Observability Pipelines disable_toc: false +products: +- name: Logs + icon: logs --- +{{< product-availability >}} + ## Overview This document walks through how to send Azure Event Hubs logs to Observability Pipelines using the Kafka source. The setup steps include setting up Azure Event Hubs for the Kafka source: diff --git a/content/en/observability_pipelines/sources/datadog_agent.md b/content/en/observability_pipelines/sources/datadog_agent.md index 2c10d3a043e..9a907aa1b1f 100644 --- a/content/en/observability_pipelines/sources/datadog_agent.md +++ b/content/en/observability_pipelines/sources/datadog_agent.md @@ -1,17 +1,22 @@ --- title: Datadog Agent Source disable_toc: false - +products: +- name: Logs + icon: logs +- name: Metrics + icon: metrics further_reading: - link: https://www.datadoghq.com/blog/manage-metrics-cost-control-with-observability-pipelines tag: Blog text: Manage metric volume and tags in your environment with Observability Pipelines - --- -Use Observability Pipelines' Datadog Agent source to receive logs from the Datadog Agent. Select and set up this source when you [set up a pipeline][1]. +{{< product-availability >}} + +Use Observability Pipelines' Datadog Agent source to receive logs or metrics ({{< tooltip glossary="preview" case="title" >}}) from the Datadog Agent. Select and set up this source when you [set up a pipeline][1]. -**Note**: If you are using the Datadog Distribution of OpenTelemetry (DDOT) Collector, you must [use the OpenTelemetry source to send logs to Observability Pipelines][4]. +**Note**: If you are using the Datadog Distribution of OpenTelemetry (DDOT) Collector for logs, you must [use the OpenTelemetry source to send logs to Observability Pipelines][4]. ## Prerequisites @@ -27,20 +32,87 @@ Use Observability Pipelines' Datadog Agent source to receive logs from the Datad ## Connect the Datadog Agent to the Observability Pipelines Worker +{{< tabs >}} +{{% tab "Logs" %}} + Use the Agent configuration file or the Agent Helm chart values file to connect the Datadog Agent to the Observability Pipelines Worker. -**Note**: If your Agent is running in a Docker container, you must exclude Observability Pipelines logs using the `DD_CONTAINER_EXCLUDE_LOGS` environment variable. For Helm, use `datadog.containerExcludeLogs`. This prevents duplicate logs, as the Worker also sends its own logs directly to Datadog. See [Docker Log Collection][2] or [Setting environment variables for Helm][3] for more information. +**Note**: If your Agent is running in a Docker container, you must exclude Observability Pipelines logs using the `DD_CONTAINER_EXCLUDE_LOGS` environment variable. For Helm, use `datadog.containerExcludeLogs`. This prevents duplicate logs, as the Worker also sends its own logs directly to Datadog. See [Docker Log Collection][1] or [Setting environment variables for Helm][2] for more information. -{{< tabs >}} -{{% tab "Agent configuration file" %}} +{{% collapse-content title="Agent configuration file" level="h4" expanded=false id="id-for-anchoring" %}} {{% observability_pipelines/log_source_configuration/datadog_agent %}} -{{% /tab %}} -{{% tab "Agent Helm values file" %}} +{{% /collapse-content %}} + +{{% collapse-content title="Agent Helm value file" level="h4" expanded=false id="id-for-anchoring" %}} {{% observability_pipelines/log_source_configuration/datadog_agent_kubernetes %}} +{{% /collapse-content %}} + +[1]: /containers/docker/log/?tab=containerinstallation#linux +[2]: /containers/guide/container-discovery-management/?tab=helm#setting-environment-variables + +{{% /tab %}} + +{{% tab "Metrics" %}} + +Use the Agent configuration file or the Agent Helm chart values file to connect the Datadog Agent to the Observability Pipelines Worker. + +**Note**: If your Agent is running in a Docker container, you must exclude Observability Pipelines metrics, such as utilization and events in/out metrics, using the `DD_CONTAINER_EXCLUDE_METRICS` environment variable. For Helm, use `datadog.containerExcludeMetrics`. This prevents duplicate metrics, as the Worker also sends its own metrics directly to Datadog. See [Docker Metrics Collection][1] or [Setting environment variables for Helm][2] for more information. + +{{% collapse-content title="Agent configuration file" level="h4" expanded=false id="id-for-anchoring" %}} + +To send Datadog Agent metrics to the Observability Pipelines Worker, update your [Agent configuration file][1] with the following: + +``` +observability_pipelines_worker: + metrics: + enabled: true + url: "http://:8383" + +``` + +`` is the host IP address or the load balancer URL associated with the Observability Pipelines Worker. +- For CloudFormation installs, use the `LoadBalancerDNS` CloudFormation output for the URL. +- For Kubernetes installs, you can use the internal DNS record of the Observability Pipelines Worker service. For example: `http://opw-observability-pipelines-worker.default.svc.cluster.local:`. + +**Note**: If the Worker is listening for logs on port 8282, you must use another port for metrics, such as 8383. + +After you restart the Agent, your observability data should be going to the Worker, processed by the pipeline, and delivered to Datadog. + +[1]: https://github.com/DataDog/datadog-agent/blob/main/pkg/config/config_template.yaml + +{{% /collapse-content %}} + +{{% collapse-content title="Agent Helm values file" level="h4" expanded=false id="id-for-anchoring" %}} + +To send Datadog Agent metrics to the Observability Pipelines Worker, update your Datadog Helm chart [datadog-values.yaml][1] with the following environment variables. See [Agent Environment Variables][2] for more information. + +``` +datadog: + env: + - name: DD_OBSERVABILITY_PIPELINES_WORKER_METRICS_ENABLED + value: true + - name: DD_OBSERVABILITY_PIPELINES_WORKER_METRICS_URL + value: "http://:8383" +``` + +`` is the host IP address or the load balancer URL associated with the Observability Pipelines Worker. + + For Kubernetes installs, you can use the internal DNS record of the Observability Pipelines Worker service. For example: `http://opw-observability-pipelines-worker.default.svc.cluster.local:`. + +**Note**: If the Worker is listening for logs on port 8282, you must use another port for metrics, such as 8383. + +[1]: https://github.com/DataDog/helm-charts/blob/main/charts/datadog/values.yaml +[2]: https://docs.datadoghq.com/agent/guide/environment-variables/ + +{{% /collapse-content %}} + +[1]: /containers/docker/data_collected/ +[2]: /containers/guide/container-discovery-management/?tab=helm#setting-environment-variables + {{% /tab %}} {{< /tabs >}} @@ -49,6 +121,4 @@ Use the Agent configuration file or the Agent Helm chart values file to connect {{< partial name="whats-next/whats-next.html" >}} [1]: /observability_pipelines/configuration/set_up_pipelines/ -[2]: /containers/docker/log/?tab=containerinstallation#linux -[3]: /containers/guide/container-discovery-management/?tab=helm#setting-environment-variables [4]: /observability_pipelines/sources/opentelemetry/#send-logs-from-the-datadog-distribution-of-opentelemetry-collector-to-observability-pipelines \ No newline at end of file diff --git a/content/en/observability_pipelines/sources/filebeat.md b/content/en/observability_pipelines/sources/filebeat.md index bb3c2f24fbf..d739470858d 100644 --- a/content/en/observability_pipelines/sources/filebeat.md +++ b/content/en/observability_pipelines/sources/filebeat.md @@ -1,8 +1,13 @@ --- title: Send Logs to Observability Pipelines with Filebeat disable_toc: false +products: +- name: Logs + icon: logs --- +{{< product-availability >}} + ## Overview Use the Logstash source to send logs to Observability Pipelines with Filebeat. diff --git a/content/en/observability_pipelines/sources/fluent.md b/content/en/observability_pipelines/sources/fluent.md index 575d24479e1..f46a4e58805 100644 --- a/content/en/observability_pipelines/sources/fluent.md +++ b/content/en/observability_pipelines/sources/fluent.md @@ -1,8 +1,13 @@ --- title: Fluentd and Fluent Bit Sources disable_toc: false +products: +- name: Logs + icon: logs --- +{{< product-availability >}} + Use Observability Pipelines' Fluentd or Fluent Bit source to receive logs from the your Fluentd or Fluent Bit agent. Select and set up this source when you [set up a pipeline][1]. ## Prerequisites diff --git a/content/en/observability_pipelines/sources/google_pubsub.md b/content/en/observability_pipelines/sources/google_pubsub.md index 9d48b139dd8..2feaed036c0 100644 --- a/content/en/observability_pipelines/sources/google_pubsub.md +++ b/content/en/observability_pipelines/sources/google_pubsub.md @@ -1,8 +1,13 @@ --- title: Google Pub/Sub Source disable_toc: false +products: +- name: Logs + icon: logs --- +{{< product-availability >}} + Use Observability Pipelines' Google Pub/Sub source to pull logs from the Google Cloud Pub/Sub messaging system. Select and set up this source when you [set up a pipeline][1]. ## Prerequisites diff --git a/content/en/observability_pipelines/sources/http_client.md b/content/en/observability_pipelines/sources/http_client.md index e26c2725422..2a5f201524d 100644 --- a/content/en/observability_pipelines/sources/http_client.md +++ b/content/en/observability_pipelines/sources/http_client.md @@ -1,8 +1,13 @@ --- title: HTTP Client Source disable_toc: false +products: +- name: Logs + icon: logs --- +{{< product-availability >}} + Use Observability Pipelines' HTTP/S Client source to pull logs from the upstream HTTP/S server. Select and set up this source when you [set up a pipeline][1]. ## Prerequisites diff --git a/content/en/observability_pipelines/sources/http_server.md b/content/en/observability_pipelines/sources/http_server.md index 33796d7dfd0..4984c58b4b6 100644 --- a/content/en/observability_pipelines/sources/http_server.md +++ b/content/en/observability_pipelines/sources/http_server.md @@ -1,8 +1,13 @@ --- title: HTTP Server Source disable_toc: false +products: +- name: Logs + icon: logs --- +{{< product-availability >}} + Use Observability Pipelines' HTTP/S Server source to collect HTTP client logs. Select and set up this source when you [set up a pipeline][1]. You can also [send AWS vended logs with Datadog Lambda Forwarder to Observability Pipelines](#send-aws-vended-logs-with-the-datadog-lambda-forwarder-to-observability-pipelines). diff --git a/content/en/observability_pipelines/sources/kafka.md b/content/en/observability_pipelines/sources/kafka.md index f64aaf9b7cb..d312d94e368 100644 --- a/content/en/observability_pipelines/sources/kafka.md +++ b/content/en/observability_pipelines/sources/kafka.md @@ -1,8 +1,13 @@ --- title: Kafka Source disable_toc: false +products: +- name: Logs + icon: logs --- +{{< product-availability >}} + Use Observability Pipelines' Kafka source to receive logs from your Kafka topics. Select and set up this source when you [set up a pipeline][1]. The Kafka source uses [librdkafka][2]. You can also [send Azure Event Hub logs to Observability Pipelines using the Kafka source](/observability_pipelines/sources/azure_event_hub/#send-azure-event-hub-logs-to-observability-pipelines-using-the-kafka-source). diff --git a/content/en/observability_pipelines/sources/lambda_extension.md b/content/en/observability_pipelines/sources/lambda_extension.md index 909803bcfee..4cbb9779c26 100644 --- a/content/en/observability_pipelines/sources/lambda_extension.md +++ b/content/en/observability_pipelines/sources/lambda_extension.md @@ -2,9 +2,13 @@ title: Send Datadog Lambda Extension Logs to Observability Pipelines description: Learn how to send Lambda Extension logs to Observability Pipelines disable_toc: false - +products: +- name: Logs + icon: logs --- +{{< product-availability >}} + This document describes how to use the Datadog Lambda Extension to send AWS vended logs to Observability Pipelines. The setup steps are: - [Set up a pipeline with the HTTP/S Server source](#set-up-a-pipeline). diff --git a/content/en/observability_pipelines/sources/lambda_forwarder.md b/content/en/observability_pipelines/sources/lambda_forwarder.md index 14ea0ad3e40..7fa8736d0f5 100644 --- a/content/en/observability_pipelines/sources/lambda_forwarder.md +++ b/content/en/observability_pipelines/sources/lambda_forwarder.md @@ -1,8 +1,13 @@ --- title: Send Datadog Lambda Forwarder Logs to Observability Pipelines disable_toc: false +products: +- name: Logs + icon: logs --- +{{< product-availability >}} + This document walks through how to send AWS vended logs with the Datadog Lambda Forwarder to Observability Pipelines. The setup steps are: - [Set up a pipeline with the HTTP/S Server source](#set-up-a-pipeline). diff --git a/content/en/observability_pipelines/sources/logstash.md b/content/en/observability_pipelines/sources/logstash.md index d731c1c5c82..3ab92531698 100644 --- a/content/en/observability_pipelines/sources/logstash.md +++ b/content/en/observability_pipelines/sources/logstash.md @@ -1,8 +1,13 @@ --- title: Logstash Source disable_toc: false +products: +- name: Logs + icon: logs --- +{{< product-availability >}} + Use Observability Pipelines' Logstash source to receive logs from your Logstash agent. Select and set up this source when you [set up a pipeline][1]. You can also use the Logstash source to [send logs to Observability Pipelines using Filebeat][2]. diff --git a/content/en/observability_pipelines/sources/opentelemetry.md b/content/en/observability_pipelines/sources/opentelemetry.md index de3017129c5..a4146d976fe 100644 --- a/content/en/observability_pipelines/sources/opentelemetry.md +++ b/content/en/observability_pipelines/sources/opentelemetry.md @@ -1,14 +1,17 @@ --- title: OpenTelemetry Source disable_toc: false - further_reading: - link: https://www.datadoghq.com/blog/manage-metrics-cost-control-with-observability-pipelines tag: Blog text: Manage metric volume and tags in your environment with Observability Pipelines - +products: +- name: Logs + icon: logs --- +{{< product-availability >}} + ## Overview Use Observability Pipelines' OpenTelemetry (OTel) source to collect logs from your OTel Collector through HTTP or gRPC. Select and set up this source when you set up a pipeline. The information below is configured in the pipelines UI. diff --git a/content/en/observability_pipelines/sources/socket.md b/content/en/observability_pipelines/sources/socket.md index 413028dabaf..63f0dadcf61 100644 --- a/content/en/observability_pipelines/sources/socket.md +++ b/content/en/observability_pipelines/sources/socket.md @@ -1,8 +1,13 @@ --- title: Socket Source disable_toc: false +products: +- name: Logs + icon: logs --- +{{< product-availability >}} + Use Observability Pipelines' Socket source to send logs to the Worker over a socket connection (TCP or UDP). Select and set up this source when you [set up a pipeline][1]. ## Prerequisites diff --git a/content/en/observability_pipelines/sources/splunk_hec.md b/content/en/observability_pipelines/sources/splunk_hec.md index 9ef32ba8b93..489e7bc6296 100644 --- a/content/en/observability_pipelines/sources/splunk_hec.md +++ b/content/en/observability_pipelines/sources/splunk_hec.md @@ -1,8 +1,13 @@ --- title: Splunk HTTP Event Collector (HEC) Source disable_toc: false +products: +- name: Logs + icon: logs --- +{{< product-availability >}} + Use Observability Pipelines' Splunk HTTP Event Collector (HEC) source to receive logs from your Splunk HEC. Select and set up this source when you [set up a pipeline][1]. **Note**: Use the Splunk HEC source if you want to [send logs from the Splunk Distribution of the OpenTelemetry Collector to Observability Pipelines](#send-logs-from-the-splunk-distribution-of-the-opentelemetry-collector-to-observability-pipelines). diff --git a/content/en/observability_pipelines/sources/splunk_tcp.md b/content/en/observability_pipelines/sources/splunk_tcp.md index d8b657c24c8..db7ef5c294c 100644 --- a/content/en/observability_pipelines/sources/splunk_tcp.md +++ b/content/en/observability_pipelines/sources/splunk_tcp.md @@ -1,8 +1,13 @@ --- title: Splunk Heavy or Universal Forwarders (TCP) Source disable_toc: false +products: +- name: Logs + icon: logs --- +{{< product-availability >}} + Use Observability Pipelines' Splunk Heavy and Universal Forwards (TCP) source to receive logs sent to your Splunk forwarders. Select and set up this source when you [set up a pipeline][1]. ## Prerequisites diff --git a/content/en/observability_pipelines/sources/sumo_logic.md b/content/en/observability_pipelines/sources/sumo_logic.md index f6562ec513f..02267ccda45 100644 --- a/content/en/observability_pipelines/sources/sumo_logic.md +++ b/content/en/observability_pipelines/sources/sumo_logic.md @@ -1,8 +1,13 @@ --- title: Sumo Logic Hosted Collector disable_toc: false +products: +- name: Logs + icon: logs --- +{{< product-availability >}} + Use Observability Pipelines' Sumo Logic Hosted Collector source to receive logs sent to your Sumo Logic Hosted Collector. Select and set up this source when you [set up a pipeline][1]. ## Prerequisites diff --git a/content/en/observability_pipelines/sources/syslog.md b/content/en/observability_pipelines/sources/syslog.md index ce22a981080..9c3bf074eec 100644 --- a/content/en/observability_pipelines/sources/syslog.md +++ b/content/en/observability_pipelines/sources/syslog.md @@ -1,8 +1,13 @@ --- title: Syslog Source disable_toc: false +products: +- name: Logs + icon: logs --- +{{< product-availability >}} + Use Observability Pipelines' rsyslog or syslog-ng to receive logs sent to rsyslog or syslog-ng. Select and set up this source when you [set up a pipeline][1]. You can also [forward third-party log to syslog](#forward-third-party-logs-to-syslog) and then send them to the Observability Pipelines Worker. diff --git a/layouts/shortcodes/observability_pipelines/prerequisites/datadog_agent.en.md b/layouts/shortcodes/observability_pipelines/prerequisites/datadog_agent.en.md index 7eb2a3f8c45..5a77fd8b871 100644 --- a/layouts/shortcodes/observability_pipelines/prerequisites/datadog_agent.en.md +++ b/layouts/shortcodes/observability_pipelines/prerequisites/datadog_agent.en.md @@ -1,10 +1,9 @@ -You already have the Datadog Agent installed to collect and route your logs to [Datadog Log Management][2001]. If you do not have Datadog Agents set up, see the [Datadog Agent documentation][2002] for more information. +You already have the Datadog Agent installed to collect and route your logs and/or metrics to Datadog. If you do not have Datadog Agents set up, see the [Datadog Agent documentation][2002] for more information. You have the following information available: - A Datadog API key with [Remote Configuration enabled][2003]. - Your Datadog [`Site URL`][2004]. For example, `datadoghq.com` for the site `US1`. -[2001]: /logs/ [2002]: /agent/ [2003]: /agent/remote_config/?tab=configurationyamlfile#setup [2004]: /getting_started/site/ diff --git a/layouts/shortcodes/observability_pipelines/processors/filter_syntax_logs_metrics.en.md b/layouts/shortcodes/observability_pipelines/processors/filter_syntax_logs_metrics.en.md index 3da03fdeeb8..684e71c3109 100644 --- a/layouts/shortcodes/observability_pipelines/processors/filter_syntax_logs_metrics.en.md +++ b/layouts/shortcodes/observability_pipelines/processors/filter_syntax_logs_metrics.en.md @@ -1,5 +1,3 @@ -#### Filter query syntax - Each processor has a corresponding filter query in their fields. Processors only process logs or metrics that match their filter query. And for all processors except the Filter processor, logs or metrics that do not match the query are sent to the next step of the pipeline. For the Filter processor, logs or metrics that do not match the query are dropped. The following are logs filter query examples: diff --git a/layouts/shortcodes/observability_pipelines/processors/filter_syntax_metrics.en.md b/layouts/shortcodes/observability_pipelines/processors/filter_syntax_metrics.en.md index 8620f006f60..4d33d224cd1 100644 --- a/layouts/shortcodes/observability_pipelines/processors/filter_syntax_metrics.en.md +++ b/layouts/shortcodes/observability_pipelines/processors/filter_syntax_metrics.en.md @@ -1,5 +1,3 @@ -#### Filter query syntax - Each processor has a corresponding filter query in their fields. Processors only process metrics that match their filter query. And for all processors except the Filter processor, metrics that do not match the query are sent to the next step of the pipeline. For the Filter processor, metrics that do not match the query are dropped. The following are metrics filter query examples: diff --git a/static/images/observability_pipelines/destinations/datadog_metrics_settings.png b/static/images/observability_pipelines/destinations/datadog_metrics_settings.png new file mode 100644 index 00000000000..f07a5b34da7 Binary files /dev/null and b/static/images/observability_pipelines/destinations/datadog_metrics_settings.png differ diff --git a/static/images/observability_pipelines/processors/tag_control_settings.png b/static/images/observability_pipelines/processors/tag_control_settings.png new file mode 100644 index 00000000000..c297458d167 Binary files /dev/null and b/static/images/observability_pipelines/processors/tag_control_settings.png differ diff --git a/static/images/observability_pipelines/setup/another_processor_group_metrics.png b/static/images/observability_pipelines/setup/another_processor_group_metrics.png new file mode 100644 index 00000000000..4592c4cd5f7 Binary files /dev/null and b/static/images/observability_pipelines/setup/another_processor_group_metrics.png differ