diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 00000000..bdf172c0 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1,5 @@ +blank_issues_enabled: false +contact_links: + - name: Questions and Answers + url: https://github.com/burningalchemist/sql_exporter/discussions + about: Please ask your questions on installation and usage here. diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 36d55db1..eaab1326 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -10,16 +10,18 @@ on: jobs: build: + if: ${{ !startsWith(github.event.head_commit.message, 'docs:') }} name: Build runs-on: ubuntu-latest steps: - name: Set up Go 1.x - uses: actions/setup-go@v2 + uses: actions/setup-go@v5 with: - go-version: ^1.18 + go-version: ^1.24 + check-latest: true id: go - name: Check out code into the Go module directory - uses: actions/checkout@v2 + uses: actions/checkout@v4 - name: Style run: make style - name: Vet diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 0a844888..04a838ba 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -11,6 +11,9 @@ on: jobs: analyze: + if: | + ${{ !startsWith(github.event.head_commit.message, 'docs:') }} || + ${{ !startsWith(github.event.head_commit.message, 'build:') }} name: Analyze runs-on: ubuntu-latest @@ -25,27 +28,27 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v2 + uses: actions/checkout@v4 with: # We must fetch at least the immediate parents so that if this is # a pull request then we can checkout the head. fetch-depth: 2 - name: Set up Go - uses: actions/setup-go@v2 + uses: actions/setup-go@v5 with: - go-version: ^1.18 + go-version: ^1.20 # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@v2 + uses: github/codeql-action/init@v3 with: languages: ${{ matrix.language }} # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@v2 + uses: github/codeql-action/autobuild@v3 # â„šī¸ Command-line programs to run using the OS shell. # 📚 https://git.io/JvXDl @@ -59,4 +62,4 @@ jobs: # make release - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@v2 + uses: github/codeql-action/analyze@v3 diff --git a/.github/workflows/helm-workflow.yaml b/.github/workflows/helm-workflow.yaml new file mode 100644 index 00000000..22fcd14a --- /dev/null +++ b/.github/workflows/helm-workflow.yaml @@ -0,0 +1,115 @@ +name: Helm +on: + push: + branches: + - master + pull_request: + branches: + - master +env: + HELM_VERSION: 3.12.1 + PYTHON_VERSION: 3.9 + TARGET_BRANCH: chart-testing-target-branch + TARGET_REMOTE: test +jobs: + helm-jobs: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + fetch-depth: 0 + - name: Set up Helm + uses: azure/setup-helm@v4 + with: + version: "v${{ env.HELM_VERSION }}" + - uses: actions/setup-python@v5 + with: + python-version: ${{ env.PYTHON_VERSION }} + check-latest: true + # --------------------------------------------------------------- + # -- Instead of comparing to the master branch, I'm getting + # -- the commit hash set in the previous step from a + # -- currently released chart. If it doesn't exists, then + # -- I assume that chart is not released and compare to the + # -- previous commit + # -- + # -- Also, I'm setting the RepoURL here. Since we plan to support + # -- the official chart in this git repository, the helm + # -- repository is expected to belong to this repo as well. + # --------------------------------------------------------------- + - name: Retrieve the latest commit sha from the helm chart + run: | + HELM_REPO_URL="https://${GITHUB_REPOSITORY_OWNER}.github.io/${GITHUB_REPOSITORY#*/}" + if helm repo add sql-exporter $HELM_REPO_URL + then + helm repo update + echo "TARGET_COMMIT=$(helm show chart sql-exporter/sql-exporter | yq '.annotations.git/commit-sha')" >> "${GITHUB_ENV}" + else + echo "TARGET_COMMIT=$(git show HEAD^1 --pretty=format:%H --no-patch)" >> "${GITHUB_ENV}" + fi + # --------------------------------------------------------------- + # -- As I could find CT doesn't support testing against commits + # -- directly, so I'm creating a new fake remote from a commit + # -- and testing the chart against it. This workaround doesn't + # -- support maintainers validation, but we have it disabled + # -- anyway + # --------------------------------------------------------------- + - name: Prepare a dummy remote to test the chart + run: | + DUMMY_REMOTE=$(mktemp -d) + git init "${DUMMY_REMOTE}" + git remote add "${TARGET_REMOTE}" "${DUMMY_REMOTE}" + git checkout -b "${TARGET_BRANCH}" "${TARGET_COMMIT}" + git push --set-upstream "${TARGET_REMOTE}" "${TARGET_BRANCH}" + git checkout "${GITHUB_SHA}" + - name: Set up chart-testing + uses: helm/chart-testing-action@v2.6.0 + - name: Run chart-testing (list-changed) + id: list-changed + run: | + changed=$(ct list-changed --chart-dirs . --target-branch "${TARGET_BRANCH}" --remote "${TARGET_REMOTE}") + if [[ -n "$changed" ]]; then + echo "changed=true" >> "$GITHUB_OUTPUT" + fi + - name: Run chart-testing (lint) + if: steps.list-changed.outputs.changed == 'true' + run: ct lint --target-branch "${TARGET_BRANCH}" --remote "${TARGET_REMOTE}" --validate-maintainers=false --chart-dirs . + - name: Setup helmfile + if: steps.list-changed.outputs.changed == 'true' + uses: mamezou-tech/setup-helmfile@v2.0.0 + - name: Create kind cluster + if: steps.list-changed.outputs.changed == 'true' + uses: helm/kind-action@v1.9.0 + - name: Init postgres server + if: steps.list-changed.outputs.changed == 'true' + run: | + helmfile -f helm/ci/helmfile.yaml sync + - name: Run chart-testing (install) + if: steps.list-changed.outputs.changed == 'true' + run: ct install --target-branch "${TARGET_BRANCH}" --remote "${TARGET_REMOTE}" --chart-dirs . + - name: Run chart-testing (upgrade) + if: steps.list-changed.outputs.changed == 'true' + run: ct install --target-branch "${TARGET_BRANCH}" --remote "${TARGET_REMOTE}" --chart-dirs . --upgrade + - name: Configure Git + run: | + git config user.name "$GITHUB_ACTOR" + git config user.email "$GITHUB_ACTOR@users.noreply.github.com" + # --------------------------------------------------------------- + # -- On each run we're setting an annotation with the current + # -- commit hash, so in case it's released, we will see it + # -- running `$ helm show sql-exporter/sql-exporter` + # --------------------------------------------------------------- + - name: Set the git sha annotations in the helm chart + run: yq -i ".annotations.git/commit-sha = \"${GITHUB_SHA}\"" ./helm/Chart.yaml + + - name: Release charts + if: ${{ github.event.repository.default_branch && github.event_name == 'push' }} + uses: helm/chart-releaser-action@main + with: + charts_dir: . + mark_as_latest: false + packages_with_index: true + env: + CR_TOKEN: "${{ secrets.GITHUB_TOKEN }}" + CR_RELEASE_NAME_TEMPLATE: "chart-{{ .Name }}-{{ .Version }}" diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index adc95ef7..978ced9a 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -14,11 +14,12 @@ jobs: VERSION: ${{ github.ref_name }} steps: - name: Set up Go 1.x - uses: actions/setup-go@v2 + uses: actions/setup-go@v5 with: - go-version: ^1.18 + go-version: ^1.24 + check-latest: true - name: Check out code into the Go module directory - uses: actions/checkout@v2 + uses: actions/checkout@v4 - name: Promu - Crossbuild run: make crossbuild @@ -43,7 +44,52 @@ jobs: run: make crossbuild-checksum - name: Upload artifacts - uses: softprops/action-gh-release@v1 + uses: softprops/action-gh-release@v2.2.2 with: files: | .tarballs/* + docker: + name: Push Docker image to Docker Hub + runs-on: ubuntu-latest + needs: build + steps: + - name: Check out the repo + uses: actions/checkout@v4 + + - name: Get Go version from the '.promu.yml' config + id: promu-go-version + run: printf "version=%s" "$(yq '.go.version' .promu.yml)" >> $GITHUB_OUTPUT + + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Log in to Docker Hub + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.DOCKER_TOKEN }} + + - name: Extract metadata (tags, labels) for Docker + id: meta + uses: docker/metadata-action@v5 + with: + images: burningalchemist/sql_exporter + tags: | + type=semver,pattern={{version}} + type=semver,pattern={{major}}.{{minor}} + + - name: Build and push Docker image + uses: docker/build-push-action@v5 + with: + context: . + file: Dockerfile.multi-arch + push: true + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + platforms: linux/amd64,linux/arm64 + provenance: false + build-args: | + GOVERSION=${{ steps.promu-go-version.outputs.version }} diff --git a/.gitignore b/.gitignore index da62d065..b63e95af 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,8 @@ /.project /.settings +.idea/ +/.vscode /sql_exporter /sql_exporter.yml +test_configs/ +.idea/* diff --git a/.promu.yml b/.promu.yml index c5b79c9d..bf325a62 100644 --- a/.promu.yml +++ b/.promu.yml @@ -1,6 +1,6 @@ go: cgo: false - version: 1.19 + version: 1.24 repository: path: github.com/burningalchemist/sql_exporter build: @@ -14,6 +14,8 @@ build: -X github.com/prometheus/common/version.Branch={{.Branch}} -X github.com/prometheus/common/version.BuildUser={{user}}@{{host}} -X github.com/prometheus/common/version.BuildDate={{date "20060102-15:04:05"}} + -s + -w tarball: files: - LICENSE @@ -27,3 +29,4 @@ crossbuild: - windows/amd64 - linux/armv7 - linux/arm64 + - darwin/arm64 diff --git a/Dockerfile.multi-arch b/Dockerfile.multi-arch new file mode 100644 index 00000000..601dcce7 --- /dev/null +++ b/Dockerfile.multi-arch @@ -0,0 +1,22 @@ +ARG GOVERSION=latest + +FROM --platform=$BUILDPLATFORM quay.io/prometheus/golang-builder:${GOVERSION}-main AS builder + +# Get sql_exporter +ADD . /go/src/github.com/burningalchemist/sql_exporter +WORKDIR /go/src/github.com/burningalchemist/sql_exporter + +# Do makefile +ARG TARGETOS +ARG TARGETARCH + +RUN GOOS=$TARGETOS GOARCH=$TARGETARCH make + +# Make image and copy build sql_exporter +FROM --platform=$TARGETPLATFORM quay.io/prometheus/busybox:latest +LABEL maintainer="The Prometheus Authors " +COPY --from=builder /go/src/github.com/burningalchemist/sql_exporter/sql_exporter /bin/sql_exporter + +EXPOSE 9399 +USER nobody +ENTRYPOINT [ "/bin/sql_exporter" ] diff --git a/LICENSE b/LICENSE index 89ad7d92..97f45ee8 100644 --- a/LICENSE +++ b/LICENSE @@ -1,5 +1,6 @@ MIT License +Copyright (c) 2020 Sergei Zyubin Copyright (c) 2017 Alin Sinpalean Permission is hereby granted, free of charge, to any person obtaining a copy diff --git a/Makefile b/Makefile index 1200287e..dfbb99e4 100644 --- a/Makefile +++ b/Makefile @@ -11,9 +11,18 @@ # See the License for the specific language governing permissions and # limitations under the License. +# To distinguish between native Windows and Windows Subsystem for Linux (WSL), +# we have to check how PATH is separated. For WSL and Unix-based systems it's +# a colon; for native Windows it's a semicolon. +ifeq '$(findstring ;,$(PATH))' ';' + GOPATH = $(firstword $(subst ;, ,$(shell $(GO) env GOPATH))) + PREFIX = $(shell cd) +endif + GO := go -GOPATH := $(firstword $(subst :, ,$(shell $(GO) env GOPATH))) +GOPATH ?= $(firstword $(subst :, ,$(shell $(GO) env GOPATH))) PROMU := $(GOPATH)/bin/promu +PROMU_VERSION := v0.17.0 pkgs = $(shell $(GO) list ./... | grep -v /vendor/) PREFIX ?= $(shell pwd) @@ -30,7 +39,7 @@ style: test: @echo ">> running tests" - @$(GO) test -short -race $(pkgs) + @$(GO) test -short $(pkgs) format: @echo ">> formatting code" @@ -73,10 +82,17 @@ docker: @echo ">> building docker image" @docker build -t "$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" . +# Override for native Windows, where the path separator is a semicolon. +ifeq '$(findstring ;,$(PATH))' ';' +promu: + @set GOOS=windows + @set GOARCH=$(subst AMD64,amd64,$(patsubst i%86,386,$(shell echo %PROCESSOR_ARCHITECTURE%))) + @$(GO) install github.com/prometheus/promu@$(PROMU_VERSION) +else promu: @GOOS=$(shell uname -s | tr A-Z a-z) \ GOARCH=$(subst x86_64,amd64,$(patsubst i%86,386,$(shell uname -m))) \ - $(GO) install github.com/prometheus/promu@v0.13.0 - + $(GO) install github.com/prometheus/promu@$(PROMU_VERSION) +endif .PHONY: all style format build test vet tarball docker promu diff --git a/README.md b/README.md index 3abde153..32e51da5 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,5 @@ -# Prometheus SQL Exporter [![Go](https://github.com/burningalchemist/sql_exporter/workflows/Go/badge.svg)](https://github.com/burningalchemist/sql_exporter/actions?query=workflow%3AGo) [![Go Report Card](https://goreportcard.com/badge/github.com/burningalchemist/sql_exporter)](https://goreportcard.com/report/github.com/burningalchemist/sql_exporter) [![Docker Pulls](https://img.shields.io/docker/pulls/burningalchemist/sql_exporter)](https://hub.docker.com/r/burningalchemist/sql_exporter) ![Downloads](https://img.shields.io/github/downloads/burningalchemist/sql_exporter/total) - -This is a permanent fork of Database agnostic SQL exporter for [Prometheus](https://prometheus.io) created by [@free](https://github.com/free/sql_exporter). +# SQL Exporter for Prometheus +[![Go](https://github.com/burningalchemist/sql_exporter/workflows/Go/badge.svg)](https://github.com/burningalchemist/sql_exporter/actions?query=workflow%3AGo) [![Go Report Card](https://goreportcard.com/badge/github.com/burningalchemist/sql_exporter)](https://goreportcard.com/report/github.com/burningalchemist/sql_exporter) [![Docker Pulls](https://img.shields.io/docker/pulls/burningalchemist/sql_exporter)](https://hub.docker.com/r/burningalchemist/sql_exporter) ![Downloads](https://img.shields.io/github/downloads/burningalchemist/sql_exporter/total) [![Artifact HUB](https://img.shields.io/endpoint?url=https://artifacthub.io/badge/repository/sql-exporter)](https://artifacthub.io/packages/helm/sql-exporter/sql-exporter) ## Overview @@ -10,6 +9,7 @@ monitoring system. Out of the box, it provides support for the following databas - MySQL - PostgreSQL - Microsoft SQL Server +- Oracle Database - Clickhouse - Snowflake - Vertica @@ -38,7 +38,9 @@ Use the `-help` flag to get help information. $ ./sql_exporter -help Usage of ./sql_exporter: -config.file string - SQL Exporter configuration file name. (default "sql_exporter.yml") + SQL Exporter configuration file path. (default "sql_exporter.yml") + -config.check + Check configuration and exit. -web.listen-address string Address to listen on for web interface and telemetry. (default ":9399") -web.metrics-path string @@ -59,7 +61,9 @@ By default we produce a binary with all the supported drivers with the following make build ``` -It's also possible to reduce the size of the binary by only including specific set of drivers like Postgres, MySQL and MSSQL. In this case we need to update `drivers.go`. To avoid manual manipulation there is a helper code generator available, so we can run the following commands: +It's also possible to reduce the size of the binary by only including specific set of drivers like Postgres, MySQL and +MSSQL. In this case we need to update `drivers.go`. To avoid manual manipulation there is a helper code generator +available, so we can run the following commands: ```shell make drivers-minimal @@ -70,31 +74,9 @@ The first command will regenerate `drivers.go` file with a minimal set of import Running `make drivers-all` will regenerate driver set back to the current defaults. -Feel free to revisit and add more drivers as required. There's also the `custom` list that allows managing a separate list of drivers for special needs. - -## Run as a Windows service - -If you run SQL Exporter from Windows, it might come in handy to register it as a service to avoid interactive sessions. -It is **important** to define `-config.file` parameter to load the configuration file. The other settings can be added -as well. The registration itself is performed with Powershell or CMD (make sure you run them as Administrator): - -Powershell: - -```powershell -New-Service -name "SqlExporterSvc" ` --BinaryPathName "%SQL_EXPORTER_PATH%\sql_exporter.exe -config.file %SQL_EXPORTER_PATH%\sql_exporter.yml" ` --StartupType Automatic ` --DisplayName "Prometheus SQL Exporter" -``` - -CMD: +Feel free to revisit and add more drivers as required. There's also the `custom` list that allows managing a separate +list of drivers for special needs. -```shell -sc.exe create SqlExporterSvc binPath= "%SQL_EXPORTER_PATH%\sql_exporter.exe -config.file %SQL_EXPORTER_PATH%\sql_exporter.yml" start= auto -``` - -`%SQL_EXPORTER_PATH%` is a path to the SQL Exporter binary executable. This document assumes that configuration files -are in the same location. ## Configuration @@ -132,18 +114,31 @@ global: # The target to monitor and the list of collectors to execute on it. target: + # Target name (optional). Setting this field enables extra metrics e.g. `up` and `scrape_duration` with + # the `target` label that are always returned on a scrape. + name: "prices_db" # Data source name always has a URI schema that matches the driver name. In some cases (e.g. MySQL) # the schema gets dropped or replaced to match the driver expected DSN format. data_source_name: 'sqlserver://prom_user:prom_password@dbserver1.example.com:1433' # Collectors (referenced by name) to execute on the target. - collectors: [pricing_data_freshness] + # Glob patterns are supported (see for syntax). + collectors: [pricing_data_freshness, pricing_*] + + # In case you need to connect to a backend that only responds to a limited set of commands (e.g. pgbouncer) or + # a data warehouse you don't want to keep online all the time (due to the extra cost), you might want to disable `ping` + # enable_ping: true # Collector definition files. +# Glob patterns are supported (see for syntax). collector_files: - "*.collector.yml" ``` +> [!NOTE] +> The `collectors` and `collector_files` configurations support [Glob pattern matching](https://pkg.go.dev/path/filepath#Match). +To match names with literal pattern terms in them, e.g. `collector_*1*`, these must be escaped: `collector_\*1\*`. + ### Collectors Collectors may be defined inline, in the exporter configuration file, under `collectors`, or they may be defined in @@ -169,31 +164,244 @@ metrics: # Arbitrary key/value pair portfolio: income values: [LastUpdateTime] + # Static metric value (optional). Useful in case we are interested in string data (key_labels) only. It's mutually + # exclusive with `values` field. + # static_value: 1 + # Timestamp value (optional). Should point at the existing column containing valid timestamps to return a metric + # with an explicit timestamp. + # timestamp_value: CreatedAt query: | SELECT Market, max(UpdateTime) AS LastUpdateTime FROM MarketPrices GROUP BY Market ``` -### Data Source Names +### Data Source Names (DSN) To keep things simple and yet allow fully configurable database connections, SQL Exporter uses DSNs (like `sqlserver://prom_user:prom_password@dbserver1.example.com:1433`) to refer to database instances. ---- - -**UPDATE:** Since v0.9.0 `sql_exporter` relies on `github.com/xo/dburl` package for parsing Data Source Names (DSN). -This can potentially affect your connection to certain databases like MySQL, so you might want to adjust your connection -string accordingly: +This exporter relies on `xo/dburl` package for parsing Data Source Names (DSN). The goal is to have a +unified way to specify DSNs across all supported databases. This can potentially affect your connection to certain +databases like MySQL, so you might want to adjust your connection string accordingly: ```plaintext mysql://user:pass@localhost/dbname - for TCP connection mysql:/var/run/mysqld/mysqld.sock - for Unix socket connection ``` +> [!IMPORTANT] +> If your DSN contains special characters in any part of your connection string (including passwords), you might need to +apply [URL encoding](https://en.wikipedia.org/wiki/URL_encoding#Reserved_characters) (percent-encoding) to them. +For example, `p@$$w0rd#abc` then becomes `p%40%24%24w0rd%23abc`. + For additional details please refer to [xo/dburl](https://github.com/xo/dburl) documentation. -## TLS and Basic Authentication + +## Miscellaneous + +
+Handling NULL values + +Queries that return `NULL` values are supported, but they are not rendered as metrics. It's useful for situations, when +the result set depends on some conditions, so it may be empty. Whenever a query returns `NULL` values, the exporter +logs a message at the `Debug` level. If your query constantly returns `NULL` values, it most likely means that you need +to revisit your query logic. +
+ +
+Multiple database connections + +It is possible to run a single exporter instance against multiple database connections. In this case we need to +configure `jobs` list instead of the `target` section as in the following example: + +```yaml +jobs: + - job_name: db_targets + collectors: [pricing_data_freshness, pricing_*] + enable_ping: true # Optional, true by default. Set to `false` in case you connect to pgbouncer or a data warehouse + static_configs: + - targets: + pg1: 'pg://db1@127.0.0.1:25432/postgres?sslmode=disable' + pg2: 'postgresql://username:password@pg-host.example.com:5432/dbname?sslmode=disable' + labels: # Optional, arbitrary key/value pair for all targets + cluster: cluster1 +``` + +, where DSN strings are assigned to the arbitrary instance names (i.e. pg1 and pg2). + +We can also define multiple jobs to run different collectors against different target sets. + +Since v0.14, sql_exporter can be passed an optional list of job names to filter out metrics. The `jobs[]` query +parameter may be used multiple times. In Prometheus configuration we can use this syntax under the [scrape +config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#%3Cscrape_config%3E): + +```yaml + params: + jobs[]: + - db_targets1 + - db_targets2 +``` + +This might be useful for scraping targets with different intervals or any other advanced use cases, when calling all +jobs at once is undesired. + +
+ +
+Scraping PgBouncer, ProxySQL, Clickhouse or Snowflake + +Given that PgBouncer is a connection pooler, it doesn't support all the commands that a regular SQL database does, so +we need to make some adjustments to the configuration: + +- add `enable_ping: false` to the metric/job configuration as PgBouncer doesn't support the ping command; +- add `no_prepared_statement: true` to the metric/job configuration as PgBouncer doesn't support the extended query protocol; + +For libpq (postgres) driver we only need to set `no_prepared_statement: true` parameter. For pgx driver, we also need to +add `default_query_exec_mode=simple_protocol` parameter to the DSN (for v5). + +Below is an example of a metric configuration for PgBouncer: +```yaml + metrics: + - metric_name: max_connections + no_prepared_statement: true + type: gauge + values: [max_connections] + key_labels: + - name + - database + - force_user + - pool_mode + - disabled + - paused + - current_connections + - reserve_pool + - min_pool_size + - pool_size + - port + query: | + SHOW DATABASES; + +``` + +Same goes for ProxySQL and Clickhouse, where we need to add `no_prepared_statement: true` to the metric/job +configuration, as these databases doesn't support prepared statements. + +In case, you connect to a data warehouse (e.g. Snowflake) you don't want to keep online all the time (due to the extra +cost), you might want to disable `ping` by setting `enable_ping: false`. +
+ +
+Scraping timestamp value from the result set + +Some database drivers by default return DATE or DATETIME values as String type, whereas sql_exporter expects it to be Time. + +This may result in the following error: +``` +unsupported Scan, storing driver.Value type []uint8 into type *time.Time +``` + +To resolve the issue, make sure to include `parseTime=true` as a parameter on the DSN, so values with TIMESTAMP, DATETIME, TIME, DATE types +will end up as `time.Time` type, which is a requirement on the sql_exporter side to process the value correctly. +
+ +
+Using AWS Secrets Manager + +If the database runs on AWS EC2 instance, this is a secure option to store the DSN without having it in +the configuration file. To use this option: + +- Create a [secret](https://docs.aws.amazon.com/secretsmanager/latest/userguide/manage_create-basic-secret.html) in + key/value pairs format, specify Key `data_source_name` and then for Value enter the DSN value. + For the secret name, enter a name for your secret, and pass that name in the configuration file as a value for + `aws_secret_name` item under `target`. Secret json example: + +```json +{ + "data_source_name": "sqlserver://prom_user:prom_password@dbserver1.example.com:1433" +} +``` + +- Configuration file example: + +```yaml +... +target: + aws_secret_name: '' +... +``` + +- Allow read-only access from EC2 IAM role to the secret by attaching a [resource-based +policy](https://docs.aws.amazon.com/secretsmanager/latest/userguide/auth-and-access_resource-based-policies.html) to +the secret. Policy example: + +```json +{ + "Version" : "2012-10-17", + "Statement" : [ + { + "Effect": "Allow", + "Principal": {"AWS": "arn:aws:iam::123456789012:role/EC2RoleToAccessSecrets"}, + "Action": "secretsmanager:GetSecretValue", + "Resource": "*", + } + ] +} +``` + +Currently, AWS Secret Manager integration is only available for a single target configuration. + +
+ +
+Run as a Windows service + +If you run SQL Exporter from Windows, it might come in handy to register it as a service to avoid interactive sessions. +It is **important** to define `--config.file` parameter to load the configuration file. The other settings can be added +as well. The registration itself is performed with Powershell or CMD (make sure you run it as Administrator): + +Powershell: + +```powershell +New-Service -name "SqlExporterSvc" ` +-BinaryPathName "%SQL_EXPORTER_PATH%\sql_exporter.exe --config.file %SQL_EXPORTER_PATH%\sql_exporter.yml" ` +-StartupType Automatic ` +-DisplayName "Prometheus SQL Exporter" +``` + +CMD: + +```shell +sc.exe create SqlExporterSvc binPath= "%SQL_EXPORTER_PATH%\sql_exporter.exe --config.file %SQL_EXPORTER_PATH%\sql_exporter.yml" start= auto +``` + +`%SQL_EXPORTER_PATH%` is a path to the SQL Exporter binary executable. This document assumes that configuration files +are in the same location. + +In case you need a more sophisticated setup (e.g. with logging, environment variables, etc), you might want to use [NSSM](https://nssm.cc/) or +[WinSW](https://github.com/winsw/winsw). Please consult their documentation for more details. + +
+ +
+Using WinSSPI/NTLM as the authentication mechanism for MSSQL + +If sql_exporter is running in the same Windows domain as the MSSQL, then you can use the parameter `authenticator=winsspi` within the connection string to authenticate without any additional credentials: + +``` +sqlserver://@:?authenticator=winsspi +``` + +If you want to use Windows credentials to authenticate instead of MSSQL credentials, you can use the parameter `authenticator=ntlm` within the connection string. The USERNAME and PASSWORD then corresponds +to a Windows username and password. The Windows domain may need to be prefixed to the username with a trailing `\`: + +``` +sqlserver://:@:?authenticator=ntlm +``` +
+ +
+TLS and Basic Authentication SQL Exporter supports TLS and Basic Authentication. This enables better control of the various HTTP endpoints. @@ -201,6 +409,13 @@ To use TLS and/or Basic Authentication, you need to pass a configuration file us The format of the file is described in the [exporter-toolkit](https://github.com/prometheus/exporter-toolkit/blob/master/docs/web-configuration.md) repository. +
+ +If you have an issue using sql_exporter, please check [Discussions](https://github.com/burningalchemist/sql_exporter/discussions) or +closed [Issues](https://github.com/burningalchemist/sql_exporter/issues?q=is%3Aissue+is%3Aclosed) first. Chances are +someone else has already encountered the same problem and there is a solution. If not, feel free to create a new +discussion. + ## Why It Exists SQL Exporter started off as an exporter for Microsoft SQL Server, for which no reliable exporters exist. But what is @@ -221,3 +436,8 @@ philosophical issue, but practical issues are not all that difficult to imagine: The control they provide over which labels get applied is limited, and the base label set spammy. And finally, configurations are not easily reused without copy-pasting and editing across jobs and instances. + +## Credits + +This is a permanent fork of Database agnostic SQL exporter for [Prometheus](https://prometheus.io) created by +[@free](https://github.com/free/sql_exporter). diff --git a/VERSION b/VERSION index f374f666..249afd51 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -0.9.1 +0.18.1 diff --git a/cmd/sql_exporter/log.go b/cmd/sql_exporter/log.go new file mode 100644 index 00000000..3cdbe39b --- /dev/null +++ b/cmd/sql_exporter/log.go @@ -0,0 +1,60 @@ +package main + +import ( + "fmt" + "log/slog" + "os" + + "github.com/prometheus/common/promslog" +) + +type logConfig struct { + logger *slog.Logger + logFileHandler *os.File +} + +// initLogFile opens the log file for writing if a log file is specified. +func initLogFile(logFile string) (*os.File, error) { + if logFile == "" { + return nil, nil + } + logFileHandler, err := os.OpenFile(logFile, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0o644) + if err != nil { + return nil, fmt.Errorf("error opening log file: %w", err) + } + return logFileHandler, nil +} + +// initLogConfig configures and initializes the logging system. +func initLogConfig(logLevel, logFormat string, logFile string) (*logConfig, error) { + logFileHandler, err := initLogFile(logFile) + if err != nil { + return nil, err + } + + if logFileHandler == nil { + logFileHandler = os.Stderr + } + + promslogConfig := &promslog.Config{ + Level: promslog.NewLevel(), + Format: promslog.NewFormat(), + Style: promslog.SlogStyle, + Writer: logFileHandler, + } + + if err := promslogConfig.Level.Set(logLevel); err != nil { + return nil, err + } + + if err := promslogConfig.Format.Set(logFormat); err != nil { + return nil, err + } + // Initialize logger. + logger := promslog.New(promslogConfig) + + return &logConfig{ + logger: logger, + logFileHandler: logFileHandler, + }, nil +} diff --git a/cmd/sql_exporter/main.go b/cmd/sql_exporter/main.go index 922dcb5a..5594548d 100644 --- a/cmd/sql_exporter/main.go +++ b/cmd/sql_exporter/main.go @@ -3,25 +3,29 @@ package main import ( "flag" "fmt" + "log/slog" "net/http" "os" + "os/signal" "runtime" + "syscall" "time" "github.com/burningalchemist/sql_exporter" + cfg "github.com/burningalchemist/sql_exporter/config" _ "github.com/kardianos/minwinsvc" "github.com/prometheus/client_golang/prometheus" + info "github.com/prometheus/client_golang/prometheus/collectors/version" "github.com/prometheus/client_golang/prometheus/promhttp" - "github.com/prometheus/common/promlog" + "github.com/prometheus/common/model" "github.com/prometheus/common/version" "github.com/prometheus/exporter-toolkit/web" - "k8s.io/klog/v2" ) const ( - envConfigFile = "SQLEXPORTER_CONFIG" - envDebug = "SQLEXPORTER_DEBUG" - httpReadHeaderTimeout = time.Duration(time.Second * 60) + appName string = "sql_exporter" + + httpReadHeaderTimeout time.Duration = time.Duration(time.Second * 60) ) var ( @@ -31,57 +35,77 @@ var ( enableReload = flag.Bool("web.enable-reload", false, "Enable reload collector data handler") webConfigFile = flag.String("web.config.file", "", "[EXPERIMENTAL] TLS/BasicAuth configuration file path") configFile = flag.String("config.file", "sql_exporter.yml", "SQL Exporter configuration file path") - logFormatJSON = flag.Bool("log.json", false, "Set log output format to JSON") + configCheck = flag.Bool("config.check", false, "Check configuration and exit") + logFormat = flag.String("log.format", "logfmt", "Set log output format") logLevel = flag.String("log.level", "info", "Set log level") + logFile = flag.String("log.file", "", "Log file to write to, leave empty to write to stderr") ) func init() { - prometheus.MustRegister(version.NewCollector("sql_exporter")) + prometheus.MustRegister(info.NewCollector("sql_exporter")) + flag.BoolVar(&cfg.EnablePing, "config.enable-ping", true, "Enable ping for targets") + flag.BoolVar(&cfg.IgnoreMissingVals, "config.ignore-missing-values", false, "[EXPERIMENTAL] Ignore results with missing values for the requested columns") + flag.StringVar(&cfg.DsnOverride, "config.data-source-name", "", "Data source name to override the value in the configuration file with") + flag.StringVar(&cfg.TargetLabel, "config.target-label", "target", "Target label name") } func main() { - if os.Getenv(envDebug) != "" { + if os.Getenv(cfg.EnvDebug) != "" { runtime.SetBlockProfileRate(1) runtime.SetMutexProfileFraction(1) } flag.Parse() - promlogConfig := &promlog.Config{} - promlogConfig.Level = &promlog.AllowedLevel{} - _ = promlogConfig.Level.Set(*logLevel) - if *logFormatJSON { - promlogConfig.Format = &promlog.AllowedFormat{} - _ = promlogConfig.Format.Set("json") + // Show version and exit. + if *showVersion { + fmt.Println(version.Print(appName)) + os.Exit(0) } - // Overriding the default klog with our go-kit klog implementation. - // Thus we need to pass it our go-kit logger object. - logger := promlog.New(promlogConfig) - klog.SetLogger(logger) - - // Override --alsologtostderr default value. - if alsoLogToStderr := flag.Lookup("alsologtostderr"); alsoLogToStderr != nil { - alsoLogToStderr.DefValue = "true" - _ = alsoLogToStderr.Value.Set("true") + // Setup logging. + logConfig, err := initLogConfig(*logLevel, *logFormat, *logFile) + if err != nil { + fmt.Printf("Error initializing exporter: %s\n", err) + os.Exit(1) } + + defer func() { + if logConfig.logFileHandler != nil { + logConfig.logFileHandler.Close() + } + }() + + slog.SetDefault(logConfig.logger) + // Override the config.file default with the SQLEXPORTER_CONFIG environment variable if set. - if val, ok := os.LookupEnv(envConfigFile); ok { + if val, ok := os.LookupEnv(cfg.EnvConfigFile); ok { *configFile = val } - if *showVersion { - fmt.Println(version.Print("sql_exporter")) + if *configCheck { + slog.Info("Checking configuration file", "configFile", *configFile) + if _, err := cfg.Load(*configFile); err != nil { + slog.Error("Configuration check failed", "error", err) + os.Exit(1) + } + slog.Info("Configuration check successful") os.Exit(0) } - klog.Warningf("Starting SQL exporter %s %s", version.Info(), version.BuildContext()) - + slog.Warn("Starting SQL exporter", "versionInfo", version.Info(), "buildContext", version.BuildContext()) exporter, err := sql_exporter.NewExporter(*configFile) if err != nil { - klog.Fatalf("Error creating exporter: %s", err) + slog.Error("Error creating exporter", "error", err) + os.Exit(1) } + // Start the scrape_errors_total metric drop ticker if configured. + startScrapeErrorsDropTicker(exporter, exporter.Config().Globals.ScrapeErrorDropInterval) + + // Start signal handler to reload collector and target data. + signalHandler(exporter, *configFile) + // Setup and start webserver. http.HandleFunc("/healthz", func(w http.ResponseWriter, r *http.Request) { http.Error(w, "OK", http.StatusOK) }) http.HandleFunc("/", HomeHandlerFunc(*metricsPath)) @@ -89,45 +113,59 @@ func main() { http.Handle(*metricsPath, promhttp.InstrumentMetricHandler(prometheus.DefaultRegisterer, ExporterHandlerFor(exporter))) // Expose exporter metrics separately, for debugging purposes. http.Handle("/sql_exporter_metrics", promhttp.HandlerFor(prometheus.DefaultGatherer, promhttp.HandlerOpts{})) - - // Expose refresh handler to reload query collections + // Expose refresh handler to reload collectors and targets if *enableReload { - http.HandleFunc("/reload", reloadCollectors(exporter)) + http.HandleFunc("/reload", reloadHandler(exporter, *configFile)) } - klog.Warning("Listening on ", *listenAddress) server := &http.Server{Addr: *listenAddress, ReadHeaderTimeout: httpReadHeaderTimeout} - if err := web.ListenAndServe(server, *webConfigFile, logger); err != nil { - klog.Fatal(err) + if err := web.ListenAndServe(server, &web.FlagConfig{ + WebListenAddresses: &([]string{*listenAddress}), + WebConfigFile: webConfigFile, WebSystemdSocket: OfBool(false), + }, logConfig.logger); err != nil { + slog.Error("Error starting web server", "error", err) + os.Exit(1) + } } -func reloadCollectors(e sql_exporter.Exporter) func(http.ResponseWriter, *http.Request) { +// reloadHandler returns a handler that reloads collector and target data. +func reloadHandler(e sql_exporter.Exporter, configFile string) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { - klog.Infof("Reloading the collectors...") - config := e.Config() - if err := config.ReloadCollectorFiles(); err != nil { - klog.Errorf("Error reloading collector configs - %v", err) + if err := sql_exporter.Reload(e, &configFile); err != nil { + slog.Error("Error reloading collector and target data", "error", err) http.Error(w, err.Error(), http.StatusInternalServerError) + return } + w.WriteHeader(http.StatusOK) + } +} - // FIXME: Should be t.Collectors() instead of config.Collectors - target, err := sql_exporter.NewTarget("", "", string(config.Target.DSN), config.Collectors, nil, config.Globals) - if err != nil { - klog.Errorf("Error creating a new target - %v", err) +// signalHandler listens for SIGHUP signals and reloads the collector and target data. +func signalHandler(e sql_exporter.Exporter, configFile string) { + c := make(chan os.Signal, 1) + signal.Notify(c, syscall.SIGHUP) + go func() { + for range c { + if err := sql_exporter.Reload(e, &configFile); err != nil { + slog.Error("Error reloading collector and target data", "error", err) + } } - e.UpdateTarget([]sql_exporter.Target{target}) - - klog.Infof("Query collectors have been successfully reloaded") - w.WriteHeader(http.StatusNoContent) - } + }() } -// LogFunc is an adapter to allow the use of any function as a promhttp.Logger. If f is a function, LogFunc(f) is a -// promhttp.Logger that calls f. -type LogFunc func(args ...interface{}) +// startScrapeErrorsDropTicker starts a ticker that periodically drops scrape error metrics. +func startScrapeErrorsDropTicker(exporter sql_exporter.Exporter, interval model.Duration) { + if interval <= 0 { + return + } -// Println implements promhttp.Logger. -func (log LogFunc) Println(args ...interface{}) { - log(args) + ticker := time.NewTicker(time.Duration(interval)) + slog.Warn("Started scrape_errors_total metrics drop ticker", "interval", interval) + go func() { + defer ticker.Stop() + for range ticker.C { + exporter.DropErrorMetrics() + } + }() } diff --git a/cmd/sql_exporter/promhttp.go b/cmd/sql_exporter/promhttp.go index 98c0cc66..4ffd9936 100644 --- a/cmd/sql_exporter/promhttp.go +++ b/cmd/sql_exporter/promhttp.go @@ -1,29 +1,31 @@ package main import ( - "bytes" - "compress/gzip" "context" "errors" - "fmt" "io" + "log/slog" "net/http" "strconv" - "strings" - "sync" "time" "github.com/burningalchemist/sql_exporter" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/expfmt" - "k8s.io/klog/v2" ) const ( - contentTypeHeader = "Content-Type" - contentLengthHeader = "Content-Length" - contentEncodingHeader = "Content-Encoding" - acceptEncodingHeader = "Accept-Encoding" + contentTypeHeader string = "Content-Type" + contentLengthHeader string = "Content-Length" + contentEncodingHeader string = "Content-Encoding" + acceptEncodingHeader string = "Accept-Encoding" + scrapeTimeoutHeader string = "X-Prometheus-Scrape-Timeout-Seconds" +) + +const ( + prometheusHeaderErr = "Failed to parse timeout from Prometheus header" + noMetricsGathered = "No metrics gathered" + noMetricsEncoded = "No metrics encoded" ) // ExporterHandlerFor returns an http.Handler for the provided Exporter. @@ -32,13 +34,30 @@ func ExporterHandlerFor(exporter sql_exporter.Exporter) http.Handler { ctx, cancel := contextFor(req, exporter) defer cancel() + // Parse the query params and set the job filters if any + jobFilters := req.URL.Query()["jobs[]"] + exporter.SetJobFilters(jobFilters) + // Go through prometheus.Gatherers to sanitize and sort metrics. - gatherer := prometheus.Gatherers{exporter.WithContext(ctx)} + gatherer := prometheus.Gatherers{exporter.WithContext(ctx), sql_exporter.SvcRegistry} mfs, err := gatherer.Gather() if err != nil { - klog.Infof("Error gathering metrics: %s", err) + switch t := err.(type) { + case prometheus.MultiError: + for _, err := range t { + if errors.Is(err, context.DeadlineExceeded) { + slog.Error("Timeout while collecting metrics", "error", err) + + } else { + slog.Error("Error gathering metrics", "error", err) + } + } + default: + slog.Error("Error gathering metrics", "error", err) + } if len(mfs) == 0 { - http.Error(w, "No metrics gathered, "+err.Error(), http.StatusInternalServerError) + slog.Error("No metrics gathered", "error", err) + http.Error(w, noMetricsGathered+", "+err.Error(), http.StatusInternalServerError) return } } @@ -52,19 +71,21 @@ func ExporterHandlerFor(exporter sql_exporter.Exporter) http.Handler { for _, mf := range mfs { if err := enc.Encode(mf); err != nil { errs = append(errs, err) - klog.Infof("Error encoding metric family %q: %s", mf.GetName(), err) + slog.Error("Error encoding metric family", "name", mf.GetName(), "error", err) + } } if closer, ok := writer.(io.Closer); ok { closer.Close() } if errs.MaybeUnwrap() != nil && buf.Len() == 0 { - http.Error(w, "No metrics encoded, "+errs.Error(), http.StatusInternalServerError) + slog.Error("No metrics encoded", "error", errs) + http.Error(w, noMetricsEncoded+", "+errs.Error(), http.StatusInternalServerError) return } header := w.Header() header.Set(contentTypeHeader, string(contentType)) - header.Set(contentLengthHeader, fmt.Sprint(buf.Len())) + header.Set(contentLengthHeader, strconv.Itoa(buf.Len())) if encoding != "" { header.Set(contentEncodingHeader, encoding) } @@ -76,19 +97,14 @@ func contextFor(req *http.Request, exporter sql_exporter.Exporter) (context.Cont timeout := time.Duration(0) configTimeout := time.Duration(exporter.Config().Globals.ScrapeTimeout) // If a timeout is provided in the Prometheus header, use it. - if v := req.Header.Get("X-Prometheus-Scrape-Timeout-Seconds"); v != "" { + if v := req.Header.Get(scrapeTimeoutHeader); v != "" { timeoutSeconds, err := strconv.ParseFloat(v, 64) if err != nil { - parseError := errors.Unwrap(err) switch { - case errors.Is(parseError, strconv.ErrSyntax): - { - klog.Errorf("Failed to parse timeout from Prometheus header: unsupported value") - } - case errors.Is(parseError, strconv.ErrRange): - { - klog.Errorf("Failed to parse timeout from Prometheus header: value is out of range") - } + case errors.Is(err, strconv.ErrSyntax): + slog.Error("Failed to parse timeout from Prometheus header", "error", err) + case errors.Is(err, strconv.ErrRange): + slog.Error(prometheusHeaderErr, "error", err) } } else { timeout = time.Duration(timeoutSeconds * float64(time.Second)) @@ -96,8 +112,7 @@ func contextFor(req *http.Request, exporter sql_exporter.Exporter) (context.Cont // Subtract the timeout offset, unless the result would be negative or zero. timeoutOffset := time.Duration(exporter.Config().Globals.TimeoutOffset) if timeoutOffset > timeout { - klog.Errorf("global.scrape_timeout_offset (`%s`) is greater than Prometheus' scraping timeout (`%s`), ignoring", - timeoutOffset, timeout) + slog.Error("global.scrape_timeout_offset is greater than Prometheus' scraping timeout, ignoring", "timeout", timeout, "timeoutOffset", timeoutOffset) } else { timeout -= timeoutOffset } @@ -114,33 +129,3 @@ func contextFor(req *http.Request, exporter sql_exporter.Exporter) (context.Cont } return context.WithTimeout(context.Background(), timeout) } - -var bufPool sync.Pool - -func getBuf() *bytes.Buffer { - buf := bufPool.Get() - if buf == nil { - return &bytes.Buffer{} - } - return buf.(*bytes.Buffer) -} - -func giveBuf(buf *bytes.Buffer) { - buf.Reset() - bufPool.Put(buf) -} - -// decorateWriter wraps a writer to handle gzip compression if requested. It -// returns the decorated writer and the appropriate "Content-Encoding" header -// (which is empty if no compression is enabled). -func decorateWriter(request *http.Request, writer io.Writer) (w io.Writer, encoding string) { - header := request.Header.Get(acceptEncodingHeader) - parts := strings.Split(header, ",") - for _, part := range parts { - part := strings.TrimSpace(part) - if part == "gzip" || strings.HasPrefix(part, "gzip;") { - return gzip.NewWriter(writer), "gzip" - } - } - return writer, "" -} diff --git a/cmd/sql_exporter/util.go b/cmd/sql_exporter/util.go new file mode 100644 index 00000000..bc83dfa6 --- /dev/null +++ b/cmd/sql_exporter/util.go @@ -0,0 +1,54 @@ +package main + +import ( + "bytes" + "compress/gzip" + "io" + "net/http" + "strings" + "sync" +) + +var bufPool sync.Pool + +func getBuf() *bytes.Buffer { + buf := bufPool.Get() + if buf == nil { + return &bytes.Buffer{} + } + return buf.(*bytes.Buffer) +} + +func giveBuf(buf *bytes.Buffer) { + buf.Reset() + bufPool.Put(buf) +} + +// decorateWriter wraps a writer to handle gzip compression if requested. It +// returns the decorated writer and the appropriate "Content-Encoding" header +// (which is empty if no compression is enabled). +func decorateWriter(request *http.Request, writer io.Writer) (w io.Writer, encoding string) { + header := request.Header.Get(acceptEncodingHeader) + parts := strings.Split(header, ",") + for _, part := range parts { + part := strings.TrimSpace(part) + if part == "gzip" || strings.HasPrefix(part, "gzip;") { + return gzip.NewWriter(writer), "gzip" + } + } + return writer, "" +} + +// LogFunc is an adapter to allow the use of any function as a promhttp.Logger. If f is a function, LogFunc(f) is a +// promhttp.Logger that calls f. +type LogFunc func(args ...interface{}) + +// Println implements promhttp.Logger. +func (log LogFunc) Println(args ...interface{}) { + log(args) +} + +// OfBool returns bool address. +func OfBool(i bool) *bool { + return &i +} diff --git a/collector.go b/collector.go index edd90edf..d2ad144c 100644 --- a/collector.go +++ b/collector.go @@ -4,14 +4,13 @@ import ( "context" "database/sql" "fmt" - "strings" + "log/slog" "sync" "time" "github.com/burningalchemist/sql_exporter/config" "github.com/burningalchemist/sql_exporter/errors" dto "github.com/prometheus/client_model/go" - "k8s.io/klog/v2" ) // Collector is a self-contained group of SQL queries and metric families to collect from a specific database. It is @@ -31,13 +30,7 @@ type collector struct { // NewCollector returns a new Collector with the given configuration and database. The metrics it creates will all have // the provided const labels applied. func NewCollector(logContext string, cc *config.CollectorConfig, constLabels []*dto.LabelPair) (Collector, errors.WithContext) { - logContext = fmt.Sprintf("%s, collector=%q", logContext, cc.Name) - - // Leading comma appears when target name is undefined, which is a side-effect of running in single target mode. - // Let's trim to avoid confusions. - if strings.HasPrefix(logContext, ",") { - logContext = strings.TrimLeft(logContext, ", ") - } + logContext = TrimMissingCtx(fmt.Sprintf(`%s,collector=%s`, logContext, cc.Name)) // Maps each query to the list of metric families it populates. queryMFs := make(map[*config.QueryConfig][]*MetricFamily, len(cc.Metrics)) @@ -71,7 +64,7 @@ func NewCollector(logContext string, cc *config.CollectorConfig, constLabels []* logContext: logContext, } if c.config.MinInterval > 0 { - klog.V(2).Infof("[%s] Non-zero min_interval (%s), using cached collector.", logContext, c.config.MinInterval) + slog.Warn("Non-zero min_interval, using cached collector.", "logContext", logContext, "min_interval", c.config.MinInterval) return newCachingCollector(&c), nil } return &c, nil @@ -121,15 +114,14 @@ func (cc *cachingCollector) Collect(ctx context.Context, conn *sql.DB, ch chan<- ch <- NewInvalidMetric(errors.Wrap(cc.rawColl.logContext, ctx.Err())) return } - + slog.Debug("Cache size", "length", len(cc.cache)) collTime := time.Now() select { case cacheTime := <-cc.cacheSem: // Have the lock. - if age := collTime.Sub(cacheTime); age > cc.minInterval { + if age := collTime.Sub(cacheTime); age > cc.minInterval || len(cc.cache) == 0 { // Cache contents are older than minInterval, collect fresh metrics, cache them and pipe them through. - klog.V(2).Infof("[%s] Collecting fresh metrics: min_interval=%.3fs cache_age=%.3fs", - cc.rawColl.logContext, cc.minInterval.Seconds(), age.Seconds()) + slog.Debug("Collecting fresh metrics", "logContext", cc.rawColl.logContext, "min_interval", cc.minInterval.Seconds(), "cache_age", age.Seconds()) cacheChan := make(chan Metric, capMetricChan) cc.cache = make([]Metric, 0, len(cc.cache)) go func() { @@ -137,13 +129,19 @@ func (cc *cachingCollector) Collect(ctx context.Context, conn *sql.DB, ch chan<- close(cacheChan) }() for metric := range cacheChan { + // catch invalid metrics and return them immediately, don't cache them + if ctx.Err() != nil { + slog.Debug("Context closed, returning invalid metric", "logContext", cc.rawColl.logContext) + ch <- NewInvalidMetric(errors.Wrap(cc.rawColl.logContext, ctx.Err())) + continue + } + cc.cache = append(cc.cache, metric) ch <- metric } cacheTime = collTime } else { - klog.V(2).Infof("[%s] Returning cached metrics: min_interval=%.3fs cache_age=%.3fs", - cc.rawColl.logContext, cc.minInterval.Seconds(), age.Seconds()) + slog.Debug("Returning cached metrics", "logContext", cc.rawColl.logContext, "min_interval", cc.minInterval.Seconds(), "cache_age", age.Seconds()) for _, metric := range cc.cache { ch <- metric } diff --git a/config/collector_config.go b/config/collector_config.go new file mode 100644 index 00000000..4385ad22 --- /dev/null +++ b/config/collector_config.go @@ -0,0 +1,62 @@ +package config + +import ( + "fmt" + + "github.com/prometheus/common/model" +) + +// +// Collectors +// + +// CollectorConfig defines a set of metrics and how they are collected. +type CollectorConfig struct { + Name string `yaml:"collector_name"` // name of this collector + MinInterval model.Duration `yaml:"min_interval,omitempty"` // minimum interval between query executions + Metrics []*MetricConfig `yaml:"metrics"` // metrics/queries defined by this collector + Queries []*QueryConfig `yaml:"queries,omitempty"` // named queries defined by this collector + + // Catches all undefined fields and must be empty after parsing. + XXX map[string]any `yaml:",inline" json:"-"` +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface for CollectorConfig. +func (c *CollectorConfig) UnmarshalYAML(unmarshal func(any) error) error { + // Default to undefined (a negative value) so it can be overridden by the global default when not explicitly set. + c.MinInterval = -1 + + type plain CollectorConfig + if err := unmarshal((*plain)(c)); err != nil { + return err + } + + if len(c.Metrics) == 0 { + return fmt.Errorf("no metrics defined for collector %q", c.Name) + } + + // Set metric.query for all metrics: resolve query references (if any) and generate QueryConfigs for literal queries. + queries := make(map[string]*QueryConfig, len(c.Queries)) + for _, query := range c.Queries { + queries[query.Name] = query + } + for _, metric := range c.Metrics { + if metric.QueryRef != "" { + query, found := queries[metric.QueryRef] + if !found { + return fmt.Errorf("unresolved query_ref %q in metric %q of collector %q", metric.QueryRef, metric.Name, c.Name) + } + metric.query = query + query.metrics = append(query.metrics, metric) + } else { + // For literal queries generate a QueryConfig with a name based off collector and metric name. + metric.query = &QueryConfig{ + Name: metric.Name, + Query: metric.QueryLiteral, + NoPreparedStatement: metric.NoPreparedStatement, + } + } + } + + return checkOverflow(c.XXX, "collector") +} diff --git a/config/config.go b/config/config.go index aa6bd466..31847ffe 100644 --- a/config/config.go +++ b/config/config.go @@ -1,25 +1,38 @@ package config import ( + "context" "fmt" + "log/slog" "os" "path/filepath" - "strings" - "time" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/common/model" + "github.com/sethvargo/go-envconfig" "gopkg.in/yaml.v3" - "k8s.io/klog/v2" ) // MaxInt32 defines the maximum value of allowed integers // and serves to help us avoid overflow/wraparound issues. const MaxInt32 int = 1<<31 - 1 +// EnvPrefix is the prefix for environment variables. +const ( + EnvPrefix string = "SQLEXPORTER_" + + EnvConfigFile string = EnvPrefix + "CONFIG" + EnvDebug string = EnvPrefix + "DEBUG" +) + +var ( + EnablePing bool + IgnoreMissingVals bool + DsnOverride string + TargetLabel string +) + // Load attempts to parse the given config file and return a Config object. func Load(configFile string) (*Config, error) { - klog.Infof("Loading configuration from %s", configFile) + slog.Debug("Loading configuration", "file", configFile) buf, err := os.ReadFile(configFile) if err != nil { return nil, err @@ -31,6 +44,10 @@ func Load(configFile string) (*Config, error) { return nil, err } + if c.Globals == nil { + return nil, fmt.Errorf("empty or no configuration provided") + } + return &c, nil } @@ -40,27 +57,27 @@ func Load(configFile string) (*Config, error) { // Config is a collection of jobs and collectors. type Config struct { - Globals *GlobalConfig `yaml:"global"` - CollectorFiles []string `yaml:"collector_files,omitempty"` - Target *TargetConfig `yaml:"target,omitempty"` + Globals *GlobalConfig `yaml:"global,omitempty" env:", prefix=GLOBAL_"` + CollectorFiles []string `yaml:"collector_files,omitempty" env:"COLLECTOR_FILES"` + Target *TargetConfig `yaml:"target,omitempty" env:", prefix=TARGET_"` Jobs []*JobConfig `yaml:"jobs,omitempty"` Collectors []*CollectorConfig `yaml:"collectors,omitempty"` configFile string // Catches all undefined fields and must be empty after parsing. - XXX map[string]interface{} `yaml:",inline" json:"-"` + XXX map[string]any `yaml:",inline" json:"-"` } // UnmarshalYAML implements the yaml.Unmarshaler interface for Config. -func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error { - type plain Config - if err := unmarshal((*plain)(c)); err != nil { +func (c *Config) UnmarshalYAML(unmarshal func(any) error) error { + // unmarshalConfig does the actual unmarshalling + if err := c.unmarshalConfig(unmarshal); err != nil { return err } - - if (len(c.Jobs) == 0) == (c.Target == nil) { - return fmt.Errorf("exactly one of `jobs` and `target` must be defined") + // Populate global defaults. + if err := c.populateGlobalDefaults(); err != nil { + return err } // Load any externally defined collectors. @@ -68,10 +85,63 @@ func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error { return err } + // Process environment variables. + if err := c.processEnvConfig(); err != nil { + return err + } + + // Check required fields + if err := c.checkRequiredFields(); err != nil { + return err + } + // Populate collector references for the target/jobs. + if err := c.populateCollectorReferences(); err != nil { + return err + } + + return checkOverflow(c.XXX, "config") +} + +// unmarshalConfig unmarshals the config, but does not populate global defaults, process environment variables, or check required fields. +func (c *Config) unmarshalConfig(unmarshal func(any) error) error { + type plain Config + return unmarshal((*plain)(c)) +} + +// populateGlobalDefaults populates any unset global defaults. +func (c *Config) populateGlobalDefaults() error { + if c.Globals == nil { + c.Globals = &GlobalConfig{} + // Force a dummy unmarshall to populate global defaults + return c.Globals.UnmarshalYAML(func(any) error { return nil }) + } + return nil +} + +// processEnvConfig processes environment variables. +func (c *Config) processEnvConfig() error { + return envconfig.ProcessWith(context.Background(), &envconfig.Config{ + Target: c, + Lookuper: envconfig.PrefixLookuper(EnvPrefix, envconfig.OsLookuper()), + DefaultNoInit: true, + DefaultOverwrite: true, + DefaultDelimiter: ";", + }) +} + +// checkRequiredFields checks that all required fields are present. +func (c *Config) checkRequiredFields() error { + if (len(c.Jobs) == 0) == (c.Target == nil) { + return fmt.Errorf("exactly one of `jobs` and `target` must be defined") + } + return nil +} + +// populateCollectorReferences populates collector references for the target/jobs. +func (c *Config) populateCollectorReferences() error { colls := make(map[string]*CollectorConfig) for _, coll := range c.Collectors { - // Set the min interval to the global default if not explicitly set. if coll.MinInterval < 0 { coll.MinInterval = c.Globals.MinInterval } @@ -80,6 +150,7 @@ func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error { } colls[coll.Name] = coll } + if c.Target != nil { cs, err := resolveCollectorRefs(c.Target.CollectorRefs, colls, "target") if err != nil { @@ -87,6 +158,7 @@ func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error { } c.Target.collectors = cs } + for _, j := range c.Jobs { cs, err := resolveCollectorRefs(j.CollectorRefs, colls, fmt.Sprintf("job %q", j.Name)) if err != nil { @@ -94,8 +166,7 @@ func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error { } j.collectors = cs } - - return checkOverflow(c.XXX, "config") + return nil } // YAML marshals the config into YAML format. @@ -103,19 +174,7 @@ func (c *Config) YAML() ([]byte, error) { return yaml.Marshal(c) } -// ReloadCollectorFiles reloads previously loaded collector files -func (c *Config) ReloadCollectorFiles() error { - if len(c.Collectors) > 0 { - c.Collectors = c.Collectors[:0] - } - err := c.loadCollectorFiles() - if err != nil { - return err - } - return nil -} - -// LoadCollectorFiles resolves all collector file globs to files and loads the collectors they define. +// loadCollectorFiles resolves all collector file globs to files and loads the collectors they define. func (c *Config) loadCollectorFiles() error { baseDir := filepath.Dir(c.configFile) for _, cfglob := range c.CollectorFiles { @@ -126,6 +185,7 @@ func (c *Config) loadCollectorFiles() error { // Resolve the glob to actual filenames. cfs, err := filepath.Glob(cfglob) + slog.Debug("External collector files found", "count", len(cfs), "glob", cfglob) if err != nil { // The only error can be a bad pattern. return fmt.Errorf("error resolving collector files for %s: %w", cfglob, err) @@ -145,435 +205,9 @@ func (c *Config) loadCollectorFiles() error { } c.Collectors = append(c.Collectors, &cc) - klog.Infof("Loaded collector '%s' from %s", cc.Name, cf) - } - } - - return nil -} - -// GlobalConfig contains globally applicable defaults. -type GlobalConfig struct { - MinInterval model.Duration `yaml:"min_interval"` // minimum interval between query executions, default is 0 - ScrapeTimeout model.Duration `yaml:"scrape_timeout"` // per-scrape timeout, global - TimeoutOffset model.Duration `yaml:"scrape_timeout_offset"` // offset to subtract from timeout in seconds - MaxConnLifetime time.Duration `yaml:"max_connection_lifetime"` // maximum amount of time a connection may be reused to any one target - MaxConns int `yaml:"max_connections"` // maximum number of open connections to any one target - MaxIdleConns int `yaml:"max_idle_connections"` // maximum number of idle connections to any one target - - // Catches all undefined fields and must be empty after parsing. - XXX map[string]interface{} `yaml:",inline" json:"-"` -} - -// UnmarshalYAML implements the yaml.Unmarshaler interface for GlobalConfig. -func (g *GlobalConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { - // Default to running the queries on every scrape. - g.MinInterval = model.Duration(0) - // Default to 10 seconds, since Prometheus has a 10 second scrape timeout default. - g.ScrapeTimeout = model.Duration(10 * time.Second) - // Default to .5 seconds. - g.TimeoutOffset = model.Duration(500 * time.Millisecond) - g.MaxConns = 3 - g.MaxIdleConns = 3 - g.MaxConnLifetime = time.Duration(0) - - type plain GlobalConfig - if err := unmarshal((*plain)(g)); err != nil { - return err - } - - if g.TimeoutOffset <= 0 { - return fmt.Errorf("global.scrape_timeout_offset must be strictly positive, have %s", g.TimeoutOffset) - } - - return checkOverflow(g.XXX, "global") -} - -// -// Target -// - -// TargetConfig defines a DSN and a set of collectors to be executed on it. -type TargetConfig struct { - DSN Secret `yaml:"data_source_name"` // data source name to connect to - CollectorRefs []string `yaml:"collectors"` // names of collectors to execute on the target - - collectors []*CollectorConfig // resolved collector references - - // Catches all undefined fields and must be empty after parsing. - XXX map[string]interface{} `yaml:",inline" json:"-"` -} - -// Collectors returns the collectors referenced by the target, resolved. -func (t *TargetConfig) Collectors() []*CollectorConfig { - return t.collectors -} - -// UnmarshalYAML implements the yaml.Unmarshaler interface for TargetConfig. -func (t *TargetConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { - type plain TargetConfig - if err := unmarshal((*plain)(t)); err != nil { - return err - } - - // Check required fields - if t.DSN == "" { - return fmt.Errorf("missing data_source_name for target %+v", t) - } - if err := checkCollectorRefs(t.CollectorRefs, "target"); err != nil { - return err - } - - return checkOverflow(t.XXX, "target") -} - -// -// Jobs -// - -// JobConfig defines a set of collectors to be executed on a set of targets. -type JobConfig struct { - Name string `yaml:"job_name"` // name of this job - CollectorRefs []string `yaml:"collectors"` // names of collectors to apply to all targets in this job - StaticConfigs []*StaticConfig `yaml:"static_configs"` // collections of statically defined targets - - collectors []*CollectorConfig // resolved collector references - - // Catches all undefined fields and must be empty after parsing. - XXX map[string]interface{} `yaml:",inline" json:"-"` -} - -// Collectors returns the collectors referenced by the job, resolved. -func (j *JobConfig) Collectors() []*CollectorConfig { - return j.collectors -} - -// UnmarshalYAML implements the yaml.Unmarshaler interface for JobConfig. -func (j *JobConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { - type plain JobConfig - if err := unmarshal((*plain)(j)); err != nil { - return err - } - - // Check required fields - if j.Name == "" { - return fmt.Errorf("missing name for job %+v", j) - } - if err := checkCollectorRefs(j.CollectorRefs, fmt.Sprintf("job %q", j.Name)); err != nil { - return err - } - - if len(j.StaticConfigs) == 0 { - return fmt.Errorf("no targets defined for job %q", j.Name) - } - - return checkOverflow(j.XXX, "job") -} - -// checkLabelCollisions checks for label collisions between StaticConfig labels and Metric labels. -// -//lint:ignore U1000 - it's unused so far -func (j *JobConfig) checkLabelCollisions() error { - sclabels := make(map[string]interface{}) - for _, s := range j.StaticConfigs { - for _, l := range s.Labels { - sclabels[l] = nil - } - } - - for _, c := range j.collectors { - for _, m := range c.Metrics { - for _, l := range m.KeyLabels { - if _, ok := sclabels[l]; ok { - return fmt.Errorf( - "label collision in job %q: label %q is defined both by a static_config and by metric %q of collector %q", - j.Name, l, m.Name, c.Name) - } - } - } - } - return nil -} - -// StaticConfig defines a set of targets and optional labels to apply to the metrics collected from them. -type StaticConfig struct { - Targets map[string]Secret `yaml:"targets"` // map of target names to data source names - Labels map[string]string `yaml:"labels,omitempty"` // labels to apply to all metrics collected from the targets - - // Catches all undefined fields and must be empty after parsing. - XXX map[string]interface{} `yaml:",inline" json:"-"` -} - -// UnmarshalYAML implements the yaml.Unmarshaler interface for StaticConfig. -func (s *StaticConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { - type plain StaticConfig - if err := unmarshal((*plain)(s)); err != nil { - return err - } - - // Check for empty/duplicate target names/data source names - tnames := make(map[string]interface{}) - dsns := make(map[string]interface{}) - for tname, dsn := range s.Targets { - if tname == "" { - return fmt.Errorf("empty target name in static config %+v", s) - } - if _, ok := tnames[tname]; ok { - return fmt.Errorf("duplicate target name %q in static_config %+v", tname, s) - } - tnames[tname] = nil - if dsn == "" { - return fmt.Errorf("empty data source name in static config %+v", s) - } - if _, ok := dsns[string(dsn)]; ok { - return fmt.Errorf("duplicate data source name %q in static_config %+v", tname, s) - } - dsns[string(dsn)] = nil - } - - return checkOverflow(s.XXX, "static_config") -} - -// -// Collectors -// - -// CollectorConfig defines a set of metrics and how they are collected. -type CollectorConfig struct { - Name string `yaml:"collector_name"` // name of this collector - MinInterval model.Duration `yaml:"min_interval,omitempty"` // minimum interval between query executions - Metrics []*MetricConfig `yaml:"metrics"` // metrics/queries defined by this collector - Queries []*QueryConfig `yaml:"queries,omitempty"` // named queries defined by this collector - - // Catches all undefined fields and must be empty after parsing. - XXX map[string]interface{} `yaml:",inline" json:"-"` -} - -// UnmarshalYAML implements the yaml.Unmarshaler interface for CollectorConfig. -func (c *CollectorConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { - // Default to undefined (a negative value) so it can be overridden by the global default when not explicitly set. - c.MinInterval = -1 - - type plain CollectorConfig - if err := unmarshal((*plain)(c)); err != nil { - return err - } - - if len(c.Metrics) == 0 { - return fmt.Errorf("no metrics defined for collector %q", c.Name) - } - - // Set metric.query for all metrics: resolve query references (if any) and generate QueryConfigs for literal queries. - queries := make(map[string]*QueryConfig, len(c.Queries)) - for _, query := range c.Queries { - queries[query.Name] = query - } - for _, metric := range c.Metrics { - if metric.QueryRef != "" { - query, found := queries[metric.QueryRef] - if !found { - return fmt.Errorf("unresolved query_ref %q in metric %q of collector %q", metric.QueryRef, metric.Name, c.Name) - } - metric.query = query - query.metrics = append(query.metrics, metric) - } else { - // For literal queries generate a QueryConfig with a name based off collector and metric name. - metric.query = &QueryConfig{ - Name: metric.Name, - Query: metric.QueryLiteral, - } - } - } - - return checkOverflow(c.XXX, "collector") -} - -// MetricConfig defines a Prometheus metric, the SQL query to populate it and the mapping of columns to metric -// keys/values. -type MetricConfig struct { - Name string `yaml:"metric_name"` // the Prometheus metric name - TypeString string `yaml:"type"` // the Prometheus metric type - Help string `yaml:"help"` // the Prometheus metric help text - KeyLabels []string `yaml:"key_labels,omitempty"` // expose these columns as labels from SQL - StaticLabels map[string]string `yaml:"static_labels,omitempty"` // fixed key/value pairs as static labels - ValueLabel string `yaml:"value_label,omitempty"` // with multiple value columns, map their names under this label - Values []string `yaml:"values"` // expose each of these columns as a value, keyed by column name - QueryLiteral string `yaml:"query,omitempty"` // a literal query - QueryRef string `yaml:"query_ref,omitempty"` // references a query in the query map - - valueType prometheus.ValueType // TypeString converted to prometheus.ValueType - query *QueryConfig // QueryConfig resolved from QueryRef or generated from Query - - // Catches all undefined fields and must be empty after parsing. - XXX map[string]interface{} `yaml:",inline" json:"-"` -} - -// ValueType returns the metric type, converted to a prometheus.ValueType. -func (m *MetricConfig) ValueType() prometheus.ValueType { - return m.valueType -} - -// Query returns the query defined (as a literal) or referenced by the metric. -func (m *MetricConfig) Query() *QueryConfig { - return m.query -} - -// UnmarshalYAML implements the yaml.Unmarshaler interface for MetricConfig. -func (m *MetricConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { - type plain MetricConfig - if err := unmarshal((*plain)(m)); err != nil { - return err - } - - // Check required fields - if m.Name == "" { - return fmt.Errorf("missing name for metric %+v", m) - } - if m.TypeString == "" { - return fmt.Errorf("missing type for metric %q", m.Name) - } - if m.Help == "" { - return fmt.Errorf("missing help for metric %q", m.Name) - } - if (m.QueryLiteral == "") == (m.QueryRef == "") { - return fmt.Errorf("exactly one of query and query_ref must be specified for metric %q", m.Name) - } - - switch strings.ToLower(m.TypeString) { - case "counter": - m.valueType = prometheus.CounterValue - case "gauge": - m.valueType = prometheus.GaugeValue - default: - return fmt.Errorf("unsupported metric type: %s", m.TypeString) - } - - // Check for duplicate key labels - for i, li := range m.KeyLabels { - if err := checkLabel(li, "metric", m.Name); err != nil { - return err - } - for _, lj := range m.KeyLabels[i+1:] { - if li == lj { - return fmt.Errorf("duplicate key label %q for metric %q", li, m.Name) - } + slog.Debug("Loaded collector", "name", cc.Name, "file", cf) } - if m.ValueLabel == li { - return fmt.Errorf("duplicate label %q (defined in both key_labels and value_label) for metric %q", li, m.Name) - } - } - - if len(m.Values) == 0 { - return fmt.Errorf("no values defined for metric %q", m.Name) } - if len(m.Values) > 1 { - // Multiple value columns but no value label to identify them - if m.ValueLabel == "" { - return fmt.Errorf("value_label must be defined for metric with multiple values %q", m.Name) - } - if err := checkLabel(m.ValueLabel, "value_label for metric", m.Name); err != nil { - return err - } - } - - return checkOverflow(m.XXX, "metric") -} - -// QueryConfig defines a named query, to be referenced by one or multiple metrics. -type QueryConfig struct { - Name string `yaml:"query_name"` // the query name, to be referenced via `query_ref` - Query string `yaml:"query"` // the named query - - metrics []*MetricConfig // metrics referencing this query - - // Catches all undefined fields and must be empty after parsing. - XXX map[string]interface{} `yaml:",inline" json:"-"` -} - -// UnmarshalYAML implements the yaml.Unmarshaler interface for QueryConfig. -func (q *QueryConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { - type plain QueryConfig - if err := unmarshal((*plain)(q)); err != nil { - return err - } - - // Check required fields - if q.Name == "" { - return fmt.Errorf("missing name for query %+v", *q) - } - if q.Query == "" { - return fmt.Errorf("missing query literal for query %q", q.Name) - } - - q.metrics = make([]*MetricConfig, 0, 2) - - return checkOverflow(q.XXX, "metric") -} - -// Secret special type for storing secrets. -type Secret string - -// UnmarshalYAML implements the yaml.Unmarshaler interface for Secrets. -func (s *Secret) UnmarshalYAML(unmarshal func(interface{}) error) error { - type plain Secret - return unmarshal((*plain)(s)) -} - -// MarshalYAML implements the yaml.Marshaler interface for Secrets. -func (s Secret) MarshalYAML() (interface{}, error) { - if s != "" { - return "", nil - } - return nil, nil -} - -func checkCollectorRefs(collectorRefs []string, ctx string) error { - // At least one collector, no duplicates - if len(collectorRefs) == 0 { - return fmt.Errorf("no collectors defined for %s", ctx) - } - for i, ci := range collectorRefs { - for _, cj := range collectorRefs[i+1:] { - if ci == cj { - return fmt.Errorf("duplicate collector reference %q in %s", ci, ctx) - } - } - } - return nil -} - -func resolveCollectorRefs( - collectorRefs []string, collectors map[string]*CollectorConfig, ctx string, -) ([]*CollectorConfig, error) { - resolved := make([]*CollectorConfig, 0, len(collectorRefs)) - for _, cref := range collectorRefs { - c, found := collectors[cref] - if !found { - return nil, fmt.Errorf("unknown collector %q referenced in %s", cref, ctx) - } - resolved = append(resolved, c) - } - return resolved, nil -} - -func checkLabel(label string, ctx ...string) error { - if label == "" { - return fmt.Errorf("empty label defined in %s", strings.Join(ctx, " ")) - } - if label == "job" || label == "instance" { - return fmt.Errorf("reserved label %q redefined in %s", label, strings.Join(ctx, " ")) - } - return nil -} - -func checkOverflow(m map[string]interface{}, ctx string) error { - if len(m) > 0 { - var keys []string - for k := range m { - keys = append(keys, k) - } - return fmt.Errorf("unknown fields in %s: %s", ctx, strings.Join(keys, ", ")) - } return nil } diff --git a/config/config_test.go b/config/config_test.go new file mode 100644 index 00000000..864ae66e --- /dev/null +++ b/config/config_test.go @@ -0,0 +1,90 @@ +package config + +import ( + "reflect" + "testing" +) + +func TestResolveCollectorRefs(t *testing.T) { + colls := map[string]*CollectorConfig{ + "a": {Name: "a"}, + "b": {Name: "b"}, + "c": {Name: "b"}, + "aa": {Name: "aa"}, + } + + t.Run("NoGlobbing", func(t *testing.T) { + crefs := []string{ + "a", + "b", + } + cs, err := resolveCollectorRefs(crefs, colls, "target") + if err != nil { + t.Fatalf("expected no error but got: %v", err) + } + if len(cs) != 2 { + t.Fatalf("expected len(cs)=2 but got len(cs)=%d", len(cs)) + } + expected := []*CollectorConfig{ + colls["a"], + colls["b"], + } + if !reflect.DeepEqual(cs, expected) { + t.Fatalf("expected cs=%v but got cs=%v", expected, cs) + } + }) + + t.Run("Globbing", func(t *testing.T) { + crefs := []string{ + "a*", + "b", + } + cs, err := resolveCollectorRefs(crefs, colls, "target") + if err != nil { + t.Fatalf("expected no error but got: %v", err) + } + if len(cs) != 3 { + t.Fatalf("expected len(cs)=3 but got len(cs)=%d", len(cs)) + } + expected1 := []*CollectorConfig{ + colls["a"], + colls["aa"], + colls["b"], + } + expected2 := []*CollectorConfig{ // filepath.Match() is non-deterministic + colls["aa"], + colls["a"], + colls["b"], + } + if !reflect.DeepEqual(cs, expected1) && !reflect.DeepEqual(cs, expected2) { + t.Fatalf("expected cs=%v or cs=%v but got cs=%v", expected1, expected2, cs) + } + }) + + t.Run("NoCollectorRefs", func(t *testing.T) { + crefs := []string{} + cs, err := resolveCollectorRefs(crefs, colls, "target") + if err != nil { + t.Fatalf("expected no error but got: %v", err) + } + if len(cs) != 0 { + t.Fatalf("expected len(cs)=0 but got len(cs)=%d", len(cs)) + } + }) + + t.Run("UnknownCollector", func(t *testing.T) { + crefs := []string{ + "a", + "x", + } + _, err := resolveCollectorRefs(crefs, colls, "target") + if err == nil { + t.Fatalf("expected error but got none") + } + // TODO: Code should use error types and check with 'errors.Is(err1, err2)'. + expected := "unknown collector \"x\" referenced in target" + if err.Error() != expected { + t.Fatalf("expected err=%q but got err=%q", expected, err.Error()) + } + }) +} diff --git a/config/global_config.go b/config/global_config.go new file mode 100644 index 00000000..adf6562d --- /dev/null +++ b/config/global_config.go @@ -0,0 +1,49 @@ +package config + +import ( + "fmt" + "time" + + "github.com/prometheus/common/model" +) + +// GlobalConfig contains globally applicable defaults. +type GlobalConfig struct { + MinInterval model.Duration `yaml:"min_interval" env:"MIN_INTERVAL"` // minimum interval between query executions, default is 0 + ScrapeTimeout model.Duration `yaml:"scrape_timeout" env:"SCRAPE_TIMEOUT"` // per-scrape timeout, global + TimeoutOffset model.Duration `yaml:"scrape_timeout_offset" env:"SCRAPE_TIMEOUT_OFFSET"` // offset to subtract from timeout in seconds + ScrapeErrorDropInterval model.Duration `yaml:"scrape_error_drop_interval" env:"SCRAPE_ERROR_DROP_INTERVAL"` // interval to drop scrape errors from the error counter, default is 0 + MaxConnLifetime time.Duration `yaml:"max_connection_lifetime" env:"MAX_CONNECTION_LIFETIME"` // maximum amount of time a connection may be reused to any one target + + MaxConns int `yaml:"max_connections" env:"MAX_CONNECTIONS"` // maximum number of open connections to any one target + MaxIdleConns int `yaml:"max_idle_connections" env:"MAX_IDLE_CONNECTIONS"` // maximum number of idle connections to any one target + + // Catches all undefined fields and must be empty after parsing. + XXX map[string]any `yaml:",inline" json:"-"` +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface for GlobalConfig. +func (g *GlobalConfig) UnmarshalYAML(unmarshal func(any) error) error { + // Default to running the queries on every scrape. + g.MinInterval = model.Duration(0) + // Default to 10 seconds, since Prometheus has a 10 second scrape timeout default. + g.ScrapeTimeout = model.Duration(10 * time.Second) + // Default to 0 for scrape error drop interval. + g.ScrapeErrorDropInterval = model.Duration(0) + // Default to .5 seconds. + g.TimeoutOffset = model.Duration(500 * time.Millisecond) + g.MaxConns = 3 + g.MaxIdleConns = 3 + g.MaxConnLifetime = time.Duration(0) + + type plain GlobalConfig + if err := unmarshal((*plain)(g)); err != nil { + return err + } + + if g.TimeoutOffset <= 0 { + return fmt.Errorf("global.scrape_timeout_offset must be strictly positive, have %s", g.TimeoutOffset) + } + + return checkOverflow(g.XXX, "global") +} diff --git a/config/job_config.go b/config/job_config.go new file mode 100644 index 00000000..1bd1a25d --- /dev/null +++ b/config/job_config.go @@ -0,0 +1,112 @@ +package config + +import "fmt" + +// +// Jobs +// + +// JobConfig defines a set of collectors to be executed on a set of targets. +type JobConfig struct { + Name string `yaml:"job_name"` // name of this job + CollectorRefs []string `yaml:"collectors"` // names of collectors to apply to all targets in this job + StaticConfigs []*StaticConfig `yaml:"static_configs"` // collections of statically defined targets + + collectors []*CollectorConfig // resolved collector references + + EnablePing *bool `yaml:"enable_ping,omitempty"` // ping the target before executing the collectors + + // Catches all undefined fields and must be empty after parsing. + XXX map[string]any `yaml:",inline" json:"-"` +} + +// Collectors returns the collectors referenced by the job, resolved. +func (j *JobConfig) Collectors() []*CollectorConfig { + return j.collectors +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface for JobConfig. +func (j *JobConfig) UnmarshalYAML(unmarshal func(any) error) error { + type plain JobConfig + if err := unmarshal((*plain)(j)); err != nil { + return err + } + + // Check required fields + if j.Name == "" { + return fmt.Errorf("missing name for job %+v", j) + } + if err := checkCollectorRefs(j.CollectorRefs, fmt.Sprintf("job %q", j.Name)); err != nil { + return err + } + + if len(j.StaticConfigs) == 0 { + return fmt.Errorf("no targets defined for job %q", j.Name) + } + + return checkOverflow(j.XXX, "job") +} + +// checkLabelCollisions checks for label collisions between StaticConfig labels and Metric labels. +// +//lint:ignore U1000 - it's unused so far +func (j *JobConfig) checkLabelCollisions() error { + sclabels := make(map[string]any) + for _, s := range j.StaticConfigs { + for _, l := range s.Labels { + sclabels[l] = nil + } + } + + for _, c := range j.collectors { + for _, m := range c.Metrics { + for _, l := range m.KeyLabels { + if _, ok := sclabels[l]; ok { + return fmt.Errorf( + "label collision in job %q: label %q is defined both by a static_config and by metric %q of collector %q", + j.Name, l, m.Name, c.Name) + } + } + } + } + return nil +} + +// StaticConfig defines a set of targets and optional labels to apply to the metrics collected from them. +type StaticConfig struct { + Targets map[string]Secret `yaml:"targets"` // map of target names to data source names + Labels map[string]string `yaml:"labels,omitempty"` // labels to apply to all metrics collected from the targets + + // Catches all undefined fields and must be empty after parsing. + XXX map[string]any `yaml:",inline" json:"-"` +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface for StaticConfig. +func (s *StaticConfig) UnmarshalYAML(unmarshal func(any) error) error { + type plain StaticConfig + if err := unmarshal((*plain)(s)); err != nil { + return err + } + + // Check for empty/duplicate target names/data source names + tnames := make(map[string]any) + dsns := make(map[string]any) + for tname, dsn := range s.Targets { + if tname == "" { + return fmt.Errorf("empty target name in static config %+v", s) + } + if _, ok := tnames[tname]; ok { + return fmt.Errorf("duplicate target name %q in static_config %+v", tname, s) + } + tnames[tname] = nil + if dsn == "" { + return fmt.Errorf("empty data source name in static config %+v", s) + } + if _, ok := dsns[string(dsn)]; ok { + return fmt.Errorf("duplicate data source name %q in static_config %+v", tname, s) + } + dsns[string(dsn)] = nil + } + + return checkOverflow(s.XXX, "static_config") +} diff --git a/config/metric_config.go b/config/metric_config.go new file mode 100644 index 00000000..67a9b2ec --- /dev/null +++ b/config/metric_config.go @@ -0,0 +1,139 @@ +package config + +import ( + "fmt" + "strings" + + "github.com/prometheus/client_golang/prometheus" +) + +// MetricConfig defines a Prometheus metric, the SQL query to populate it and the mapping of columns to metric +// keys/values. +type MetricConfig struct { + Name string `yaml:"metric_name"` // the Prometheus metric name + TypeString string `yaml:"type"` // the Prometheus metric type + Help string `yaml:"help"` // the Prometheus metric help text + KeyLabels []string `yaml:"key_labels,omitempty"` // expose these columns as labels from SQL + StaticLabels map[string]string `yaml:"static_labels,omitempty"` // fixed key/value pairs as static labels + ValueLabel string `yaml:"value_label,omitempty"` // with multiple value columns, map their names under this label + Values []string `yaml:"values"` // expose each of these columns as a value, keyed by column name + QueryLiteral string `yaml:"query,omitempty"` // a literal query + QueryRef string `yaml:"query_ref,omitempty"` // references a query in the query map + + NoPreparedStatement bool `yaml:"no_prepared_statement,omitempty"` // do not prepare statement + StaticValue *float64 `yaml:"static_value,omitempty"` + TimestampValue string `yaml:"timestamp_value,omitempty"` // optional column name containing a valid timestamp value + + valueType prometheus.ValueType // TypeString converted to prometheus.ValueType + query *QueryConfig // QueryConfig resolved from QueryRef or generated from Query + + // Catches all undefined fields and must be empty after parsing. + XXX map[string]any `yaml:",inline" json:"-"` +} + +// ValueType returns the metric type, converted to a prometheus.ValueType. +func (m *MetricConfig) ValueType() prometheus.ValueType { + return m.valueType +} + +// Query returns the query defined (as a literal) or referenced by the metric. +func (m *MetricConfig) Query() *QueryConfig { + return m.query +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface for MetricConfig. +func (m *MetricConfig) UnmarshalYAML(unmarshal func(any) error) error { + type plain MetricConfig + if err := unmarshal((*plain)(m)); err != nil { + return err + } + + if err := m.validateRequiredFields(); err != nil { + return err + } + if err := m.setValueType(); err != nil { + return err + } + if err := m.validateKeyLabels(); err != nil { + return err + } + if err := m.validateValues(); err != nil { + return err + } + + return checkOverflow(m.XXX, "metric") +} + +// Check required fields +func (m *MetricConfig) validateRequiredFields() error { + if m.Name == "" { + return fmt.Errorf("missing name for metric %+v", m) + } + if m.TypeString == "" { + return fmt.Errorf("missing type for metric %q", m.Name) + } + if m.Help == "" { + return fmt.Errorf("missing help for metric %q", m.Name) + } + if (m.QueryLiteral == "") == (m.QueryRef == "") { + return fmt.Errorf("exactly one of query and query_ref must be specified for metric %q", m.Name) + } + + return nil +} + +// Set the metric type +func (m *MetricConfig) setValueType() error { + switch strings.ToLower(m.TypeString) { + case "counter": + m.valueType = prometheus.CounterValue + case "gauge": + m.valueType = prometheus.GaugeValue + default: + return fmt.Errorf("unsupported metric type: %s", m.TypeString) + } + + return nil +} + +// Check for duplicate key labels +func (m *MetricConfig) validateKeyLabels() error { + for i, li := range m.KeyLabels { + if err := checkLabel(li, "metric", m.Name); err != nil { + return err + } + for _, lj := range m.KeyLabels[i+1:] { + if li == lj { + return fmt.Errorf("duplicate key label %q for metric %q", li, m.Name) + } + } + if m.ValueLabel == li { + return fmt.Errorf("duplicate label %q (defined in both key_labels and value_label) for metric %q", li, m.Name) + } + } + + return nil +} + +// Check for duplicate values +func (m *MetricConfig) validateValues() error { + if len(m.Values) == 0 && m.StaticValue == nil { + return fmt.Errorf("no values defined for metric %q", m.Name) + } + + if len(m.Values) > 0 && m.StaticValue != nil { + return fmt.Errorf("metric %q cannot have both static_value and values defined", m.Name) + } + + if len(m.Values) > 1 { + // Multiple value columns but no value label to identify them + if m.ValueLabel == "" { + return fmt.Errorf("value_label must be defined for metric with multiple values %q", m.Name) + } + if err := checkLabel(m.ValueLabel, "value_label for metric", m.Name); err != nil { + return err + } + } + + return nil +} diff --git a/config/query_config.go b/config/query_config.go new file mode 100644 index 00000000..983556e9 --- /dev/null +++ b/config/query_config.go @@ -0,0 +1,36 @@ +package config + +import "fmt" + +// QueryConfig defines a named query, to be referenced by one or multiple metrics. +type QueryConfig struct { + Name string `yaml:"query_name"` // the query name, to be referenced via `query_ref` + Query string `yaml:"query"` // the named query + + NoPreparedStatement bool `yaml:"no_prepared_statement,omitempty"` // do not prepare statement + + metrics []*MetricConfig // metrics referencing this query + + // Catches all undefined fields and must be empty after parsing. + XXX map[string]any `yaml:",inline" json:"-"` +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface for QueryConfig. +func (q *QueryConfig) UnmarshalYAML(unmarshal func(any) error) error { + type plain QueryConfig + if err := unmarshal((*plain)(q)); err != nil { + return err + } + + // Check required fields + if q.Name == "" { + return fmt.Errorf("missing name for query %+v", *q) + } + if q.Query == "" { + return fmt.Errorf("missing query literal for query %q", q.Name) + } + + q.metrics = make([]*MetricConfig, 0, 2) + + return checkOverflow(q.XXX, "metric") +} diff --git a/config/secret_config.go b/config/secret_config.go new file mode 100644 index 00000000..570024c6 --- /dev/null +++ b/config/secret_config.go @@ -0,0 +1,18 @@ +package config + +// Secret special type for storing secrets. +type Secret string + +// UnmarshalYAML implements the yaml.Unmarshaler interface for Secrets. +func (s *Secret) UnmarshalYAML(unmarshal func(any) error) error { + type plain Secret + return unmarshal((*plain)(s)) +} + +// MarshalYAML implements the yaml.Marshaler interface for Secrets. +func (s Secret) MarshalYAML() (any, error) { + if s != "" { + return "", nil + } + return nil, nil +} diff --git a/config/target_config.go b/config/target_config.go new file mode 100644 index 00000000..ec25f51c --- /dev/null +++ b/config/target_config.go @@ -0,0 +1,100 @@ +package config + +import ( + "context" + "encoding/json" + "fmt" + "log/slog" + "os" + + "github.com/aws/aws-sdk-go-v2/aws" + awsConfig "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/service/secretsmanager" +) + +// +// Target +// + +// TargetConfig defines a DSN and a set of collectors to be executed on it. +type TargetConfig struct { + Name string `yaml:"name,omitempty" env:"NAME"` // name of the target + DSN Secret `yaml:"data_source_name" env:"DSN"` // data source name to connect to + AwsSecretName string `yaml:"aws_secret_name" env:"AWS_SECRET_NAME"` // AWS secret name + CollectorRefs []string `yaml:"collectors" env:"COLLECTORS"` // names of collectors to execute on the target + EnablePing *bool `yaml:"enable_ping,omitempty" env:"ENABLE_PING"` // ping the target before executing the collectors + + collectors []*CollectorConfig // resolved collector references + + // Catches all undefined fields and must be empty after parsing. + XXX map[string]any `yaml:",inline" json:"-"` +} + +// Collectors returns the collectors referenced by the target, resolved. +func (t *TargetConfig) Collectors() []*CollectorConfig { + return t.collectors +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface for TargetConfig. +func (t *TargetConfig) UnmarshalYAML(unmarshal func(any) error) error { + type plain TargetConfig + if err := unmarshal((*plain)(t)); err != nil { + return err + } + + if t.AwsSecretName != "" { + t.DSN = readDSNFromAwsSecretManager(t.AwsSecretName) + } + + // Check required fields + if t.DSN == "" { + return fmt.Errorf("missing data_source_name for target %+v", t) + } + if err := checkCollectorRefs(t.CollectorRefs, "target"); err != nil { + return err + } + + return checkOverflow(t.XXX, "target") +} + +// AWS Secret +type AwsSecret struct { + DSN Secret `json:"data_source_name"` +} + +func readDSNFromAwsSecretManager(secretName string) Secret { + config, err := awsConfig.LoadDefaultConfig(context.TODO(), awsConfig.WithEC2IMDSRegion()) + if err != nil { + slog.Error("unable to load AWS config", "error", err) + os.Exit(1) + } + + // Create Secrets Manager client + svc := secretsmanager.NewFromConfig(config) + + input := &secretsmanager.GetSecretValueInput{ + SecretId: aws.String(secretName), + VersionStage: aws.String("AWSCURRENT"), // VersionStage defaults to AWSCURRENT if unspecified + } + + slog.Debug("reading AWS Secret", "name", secretName) + result, err := svc.GetSecretValue(context.TODO(), input) + if err != nil { + // For a list of exceptions thrown, see + // https://docs.aws.amazon.com/secretsmanager/latest/apireference/API_GetSecretValue.html + slog.Error("unable to read AWS Secret", "error", err) + os.Exit(1) + } + + // Decrypts secret using the associated KMS key. + var secretString string = *result.SecretString + + var awsSecret AwsSecret + jsonErr := json.Unmarshal([]byte(secretString), &awsSecret) + + if jsonErr != nil { + slog.Error("unable to unmarshal AWS Secret") + os.Exit(1) + } + return Secret(awsSecret.DSN) +} diff --git a/config/util.go b/config/util.go new file mode 100644 index 00000000..56f26e10 --- /dev/null +++ b/config/util.go @@ -0,0 +1,70 @@ +package config + +import ( + "fmt" + "log/slog" + "path/filepath" + "strings" +) + +func checkCollectorRefs(collectorRefs []string, ctx string) error { + // At least one collector, no duplicates + if len(collectorRefs) == 0 { + return fmt.Errorf("no collectors defined for %s", ctx) + } + for i, ci := range collectorRefs { + for _, cj := range collectorRefs[i+1:] { + if ci == cj { + return fmt.Errorf("duplicate collector reference %q in %s", ci, ctx) + } + } + } + return nil +} + +func resolveCollectorRefs( + collectorRefs []string, collectors map[string]*CollectorConfig, ctx string, +) ([]*CollectorConfig, error) { + resolved := make([]*CollectorConfig, 0, len(collectorRefs)) + found := make(map[*CollectorConfig]bool) + for _, cref := range collectorRefs { + cref_resolved := false + for k, c := range collectors { + matched, err := filepath.Match(cref, k) + if err != nil { + return nil, fmt.Errorf("bad collector %q referenced in %s: %w", cref, ctx, err) + } + if matched && !found[c] { + resolved = append(resolved, c) + found[c] = true + cref_resolved = true + } + } + if !cref_resolved { + return nil, fmt.Errorf("unknown collector %q referenced in %s", cref, ctx) + } + } + slog.Debug("Resolved collectors", "context", ctx, "count", len(resolved)) + return resolved, nil +} + +func checkLabel(label string, ctx ...string) error { + if label == "" { + return fmt.Errorf("empty label defined in %s", strings.Join(ctx, " ")) + } + if label == "job" || label == TargetLabel { + return fmt.Errorf("reserved label %q redefined in %s", label, strings.Join(ctx, " ")) + } + return nil +} + +func checkOverflow(m map[string]any, ctx string) error { + if len(m) > 0 { + var keys []string + for k := range m { + keys = append(keys, k) + } + return fmt.Errorf("unknown fields in %s: %s", ctx, strings.Join(keys, ", ")) + } + return nil +} diff --git a/documentation/sql_exporter.yml b/documentation/sql_exporter.yml index 7b5802ab..4be4f30d 100644 --- a/documentation/sql_exporter.yml +++ b/documentation/sql_exporter.yml @@ -13,6 +13,8 @@ global: # # Must be strictly positive. The default is 500ms. scrape_timeout_offset: 500ms + # Interval between dropping scrape_errors_total metric: by default (0s) metrics are persistent. + scrape_error_drop_interval: 0s # Minimum interval between collector runs: by default (0s) collectors are executed on every scrape. min_interval: 0s # Maximum number of open connections to any one target. Metric queries will run concurrently on multiple connections, @@ -28,13 +30,20 @@ global: # The target to monitor and the collectors to execute on it. target: + # Target name (optional). Setting this field enables extra metrics e.g. `up` and `scrape_duration` with the `target` + # label that are always returned on a scrape. If set, sql_exporter always returns HTTP 200 with these metrics populated + name: mssql_database # Data source name always has a URI schema that matches the driver name. In some cases (e.g. MySQL) # the schema gets dropped or replaced to match the driver expected DSN format. - data_source_name: 'sqlserver://prom_user:prom_password@dbserver1.example.com:1433' + data_source_name: 'sqlserver://prom_user:prom_password@dbserver1.example.com:1433/dbname' # Collectors (referenced by name) to execute on the target. collectors: [mssql_standard] + # In case you need to connect to a backend that only responds to a limited set of commands (e.g. pgbouncer) or + # a data warehouse you don't want to keep online all the time (due to the extra cost), you might want to disable `ping` + enable_ping: true + # A collector is a named set of related metrics that are collected together. It can be referenced by name, possibly # along with other collectors. # @@ -52,9 +61,9 @@ collectors: # The result columns conceptually fall into two categories: # * zero or more key columns: their values will be directly mapped to labels of the same name; # * one or more value columns: - # * if exactly one value column, the column name name is ignored and its value becomes the metric value + # * if exactly one value column, the column name is ignored and its value becomes the metric value # * with multiple value columns, a `value_label` must be defined; the column name will populate this label and - # the column value will popilate the metric value. + # the column value will populate the metric value. metrics: # The metric name, type and help text, as exported to /metrics. - metric_name: mssql_log_growths @@ -65,6 +74,13 @@ collectors: key_labels: # Populated from the `db` column of each row. - db + static_labels: + # Arbitrary key/value pair + env: dev + region: europe + # Optional timestamp_value to point at the existing timestamp column to return a metric with an explicit + # timestamp. + # timestamp_value: CreatedAt # This query returns exactly one value per row, in the `counter` column. values: [counter] query: | @@ -83,7 +99,7 @@ collectors: # # Required when multiple value columns are configured. value_label: operation - # Multiple value columns: their name is recorded in the label defined by `attrubute_label` (e.g. + # Multiple value columns: their name is recorded in the label defined by `attribute_label` (e.g. # `operation="io_stall_read_ms"`). values: - io_stall_read @@ -102,6 +118,19 @@ collectors: - io_stall query_ref: io_stall + # Metric with a static value to retrieve string data. + - metric_name: mssql_hostname + type: gauge + help: 'Database server hostname' + key_labels: + # Populated from the `hostname` column of the result. + - hostname + # Static value, always set to `1`. + static_value: 1 + query: | + SELECT @@SERVERNAME AS hostname + + # Named queries, referenced by one or more metrics, through query_ref. queries: # Populates `mssql_io_stall` and `mssql_io_stall_total` diff --git a/drivers_gen.go b/drivers_gen.go index 85af870f..0f7c6e60 100644 --- a/drivers_gen.go +++ b/drivers_gen.go @@ -8,8 +8,8 @@ import ( ) const ( - packageName = "sql_exporter" - filename = "drivers.go" + packageName string = "sql_exporter" + filename string = "drivers.go" ) var driverList = map[string][]string{ @@ -19,10 +19,11 @@ var driverList = map[string][]string{ "github.com/microsoft/go-mssqldb/azuread", }, "extra": { - "github.com/ClickHouse/clickhouse-go", - "github.com/jackc/pgx/v4/stdlib", + "github.com/ClickHouse/clickhouse-go/v2", + "github.com/jackc/pgx/v5/stdlib", "github.com/snowflakedb/gosnowflake", "github.com/vertica/vertica-sql-go", + "github.com/sijms/go-ora/v2", }, "custom": { //"github.com/mithrandie/csvq-driver", diff --git a/errors/errors.go b/errors/errors.go index 2284a7ea..2673a34e 100644 --- a/errors/errors.go +++ b/errors/errors.go @@ -12,23 +12,24 @@ type WithContext interface { error Context() string - RawError() string + RawError() error + Unwrap() error } // withContext implements WithContext. type withContext struct { context string - err string + err error } // New creates a new WithContext. -func New(context, err string) WithContext { - return &withContext{context, err} +func New(context string, err string) WithContext { + return &withContext{context, fmt.Errorf(err)} } // Errorf formats according to a format specifier and returns a new WithContext. -func Errorf(context, format string, a ...interface{}) WithContext { - return &withContext{context, fmt.Sprintf(format, a...)} +func Errorf(context, format string, a ...any) WithContext { + return &withContext{context, fmt.Errorf(format, a...)} } // Wrap returns a WithContext wrapping err. If err is nil, it returns nil. If err is a WithContext, it is returned @@ -40,13 +41,13 @@ func Wrap(context string, err error) WithContext { if w, ok := err.(WithContext); ok { return w } - return &withContext{context, err.Error()} + return &withContext{context, err} } // Wrapf returns a WithContext that prepends a formatted message to err.Error(). If err is nil, it returns nil. If err // is a WithContext, the returned WithContext will have the message prepended but the same context as err (presumed to // be more specific). -func Wrapf(context string, err error, format string, a ...interface{}) WithContext { +func Wrapf(context string, err error, format string, a ...any) WithContext { if err == nil { return nil } @@ -55,17 +56,17 @@ func Wrapf(context string, err error, format string, a ...interface{}) WithConte prefix = fmt.Sprintf(format, a...) } if w, ok := err.(WithContext); ok { - return &withContext{w.Context(), prefix + ": " + w.RawError()} + return &withContext{w.Context(), fmt.Errorf("%s: %w", prefix, w.RawError())} } - return &withContext{context, prefix + ": " + err.Error()} + return &withContext{context, err} } // Error implements error. func (w *withContext) Error() string { if len(w.context) == 0 { - return w.err + return w.err.Error() } - return "[" + w.context + "] " + w.err + return "[" + w.context + "] " + w.err.Error() } // Context implements WithContext. @@ -74,6 +75,11 @@ func (w *withContext) Context() string { } // RawError implements WithContext. -func (w *withContext) RawError() string { +func (w *withContext) RawError() error { return w.err } + +// Unwrap implements WithContext. +func (w *withContext) Unwrap() error { + return fmt.Errorf("[%s] %w", w.context, w.err) +} diff --git a/examples/azure-sql-mi/grafana-dashboard/azure-sql-mi.json b/examples/azure-sql-mi/grafana-dashboard/azure-sql-mi.json new file mode 100644 index 00000000..1d4d74c7 --- /dev/null +++ b/examples/azure-sql-mi/grafana-dashboard/azure-sql-mi.json @@ -0,0 +1,2605 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": { + "type": "grafana", + "uid": "-- Grafana --" + }, + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "target": { + "limit": 100, + "matchAny": false, + "tags": [], + "type": "dashboard" + }, + "type": "dashboard" + } + ] + }, + "editable": true, + "fiscalYearStartMonth": 0, + "graphTooltip": 0, + "id": 54, + "links": [], + "liveNow": false, + "panels": [ + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 4, + "panels": [], + "title": "Managed Instance Overview", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "0fyLmRlGk" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 2, + "x": 0, + "y": 1 + }, + "id": 2, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "textMode": "auto" + }, + "pluginVersion": "9.4.7", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "0fyLmRlGk" + }, + "editorMode": "code", + "expr": "mssqlmi_cpu_count{instance=~\"${instance}\"}", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Virtual Cores", + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "0fyLmRlGk" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "decbytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 3, + "x": 2, + "y": 1 + }, + "id": 19, + "options": { + "colorMode": "background", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "text": { + "valueSize": 48 + }, + "textMode": "auto" + }, + "pluginVersion": "9.4.7", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "0fyLmRlGk" + }, + "editorMode": "code", + "expr": "mssqlmi_server_memory_bytes{instance=~\"${instance}\"}", + "format": "table", + "interval": "", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Server Memory", + "transformations": [], + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "0fyLmRlGk" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "decbytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 2, + "x": 5, + "y": 1 + }, + "id": 7, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "textMode": "name" + }, + "pluginVersion": "9.4.7", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "0fyLmRlGk" + }, + "editorMode": "code", + "exemplar": false, + "expr": "mssqlmi_total_storage_bytes{instance=~\"${instance}\"}", + "instant": true, + "legendFormat": "__auto", + "range": false, + "refId": "A" + } + ], + "title": "Server Tier", + "transformations": [ + { + "id": "labelsToFields", + "options": { + "keepLabels": [ + "sku" + ], + "mode": "columns", + "valueLabel": "sku" + } + } + ], + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "0fyLmRlGk" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "decbytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 2, + "x": 7, + "y": 1 + }, + "id": 8, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "textMode": "name" + }, + "pluginVersion": "9.4.7", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "0fyLmRlGk" + }, + "editorMode": "code", + "exemplar": false, + "expr": "mssqlmi_total_storage_bytes{instance=~\"${instance}\"}", + "instant": true, + "legendFormat": "__auto", + "range": false, + "refId": "A" + } + ], + "title": "Service Level Objective", + "transformations": [ + { + "id": "labelsToFields", + "options": { + "keepLabels": [ + "hardware_type" + ], + "mode": "columns", + "valueLabel": "hardware_type" + } + } + ], + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "0fyLmRlGk" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "decbytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 3, + "x": 9, + "y": 1 + }, + "id": 10, + "options": { + "colorMode": "background", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "text": { + "valueSize": 48 + }, + "textMode": "auto" + }, + "pluginVersion": "9.4.7", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "0fyLmRlGk" + }, + "editorMode": "code", + "expr": "mssqlmi_total_storage_bytes{instance=~\"${instance}\"}", + "format": "table", + "interval": "", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Total Storage", + "transformations": [], + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "0fyLmRlGk" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "red", + "value": null + }, + { + "color": "#EAB839", + "value": 150000000 + }, + { + "color": "green", + "value": 200000000 + } + ] + }, + "unit": "decbytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 3, + "x": 12, + "y": 1 + }, + "id": 18, + "options": { + "colorMode": "background", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "text": { + "valueSize": 48 + }, + "textMode": "auto" + }, + "pluginVersion": "9.4.7", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "0fyLmRlGk" + }, + "editorMode": "code", + "expr": "mssqlmi_available_storage_bytes{instance=~\"${instance}\"}", + "format": "table", + "interval": "", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Available Storage", + "transformations": [], + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "0fyLmRlGk" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 3, + "x": 15, + "y": 1 + }, + "id": 17, + "options": { + "colorMode": "background", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "text": { + "valueSize": 48 + }, + "textMode": "auto" + }, + "pluginVersion": "9.4.7", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "0fyLmRlGk" + }, + "editorMode": "code", + "expr": "mssqlmi_db_online{instance=~\"${instance}\"}", + "format": "table", + "interval": "", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "# Databases", + "transformations": [], + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "0fyLmRlGk" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "max": 1, + "min": 0, + "thresholds": { + "mode": "percentage", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "#EAB839", + "value": 70 + }, + { + "color": "red", + "value": 90 + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 7, + "x": 0, + "y": 4 + }, + "id": 52, + "options": { + "displayMode": "lcd", + "minVizHeight": 10, + "minVizWidth": 0, + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "/^Value$/", + "values": false + }, + "showUnfilled": true + }, + "pluginVersion": "9.4.7", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "0fyLmRlGk" + }, + "editorMode": "code", + "exemplar": false, + "expr": "(mssqlmi_total_storage_bytes{instance=~\"${instance}\"} - ignoring(instance,hardware_type,sku) mssqlmi_available_storage_bytes{instance=~\"${instance}\"}) / ignoring(instance,hardware_type,sku) mssqlmi_total_storage_bytes{instance=~\"${instance}\"}", + "format": "table", + "instant": true, + "interval": "", + "legendFormat": "__auto", + "range": false, + "refId": "A" + } + ], + "title": "Storage Usage %", + "transformations": [], + "type": "bargauge" + }, + { + "datasource": { + "type": "prometheus", + "uid": "0fyLmRlGk" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "decbytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 7 + }, + "id": 54, + "options": { + "legend": { + "calcs": [ + "lastNotNull" + ], + "displayMode": "table", + "placement": "right", + "showLegend": true, + "sortBy": "Last *", + "sortDesc": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "0fyLmRlGk" + }, + "editorMode": "code", + "expr": "mssqlmi_database_size_bytes{instance=~\"${instance}\"}", + "interval": "", + "legendFormat": "{{file_type}} {{database}}", + "range": true, + "refId": "A" + } + ], + "title": "Database Sizes", + "type": "timeseries" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 15 + }, + "id": 13, + "panels": [], + "title": "SQL Activity", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "0fyLmRlGk" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 16 + }, + "id": 15, + "options": { + "legend": { + "calcs": [ + "lastNotNull", + "mean" + ], + "displayMode": "table", + "placement": "right", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "0fyLmRlGk" + }, + "editorMode": "code", + "expr": "rate(mssqlmi_perf_counter{counter=\"Batch Requests/sec\",instance=~\"${instance}\"}[$__interval])", + "interval": "", + "legendFormat": "{{counter}}", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "0fyLmRlGk" + }, + "editorMode": "code", + "expr": "rate(mssqlmi_perf_counter{counter=\"SQL Compilations/sec\",instance=~\"${instance}\"}[$__interval])", + "hide": false, + "interval": "", + "legendFormat": "{{counter}}", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "0fyLmRlGk" + }, + "editorMode": "code", + "expr": "rate(mssqlmi_perf_counter{counter=\"SQL Re-Compilations/sec\",instance=~\"${instance}\"}[$__interval])", + "hide": false, + "interval": "", + "legendFormat": "{{counter}}", + "range": true, + "refId": "C" + } + ], + "title": "Batch Requests/sec", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "0fyLmRlGk" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 16 + }, + "id": 16, + "options": { + "legend": { + "calcs": [ + "lastNotNull", + "mean" + ], + "displayMode": "table", + "placement": "right", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "0fyLmRlGk" + }, + "editorMode": "code", + "expr": "rate(mssqlmi_perf_counter{counter=\"Transactions/sec\",instance=~\"${instance}\"}[$__interval])", + "interval": "", + "legendFormat": "{{db}}", + "range": true, + "refId": "A" + } + ], + "title": "Transactions/sec", + "type": "timeseries" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 24 + }, + "id": 21, + "panels": [], + "title": "CPU and Queuing", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "0fyLmRlGk" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "percent" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 25 + }, + "id": 23, + "options": { + "legend": { + "calcs": [ + "lastNotNull", + "mean" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "0fyLmRlGk" + }, + "editorMode": "code", + "expr": "sum(mssqlmi_perf_gauge{gauge=\"CPU usage %\", object=~\".*Resource Pool Stats\", instance=~\"${instance}\"})", + "interval": "", + "legendFormat": "CPU usage %", + "range": true, + "refId": "A" + } + ], + "title": "Total %CPU Usage", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "0fyLmRlGk" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "percent" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 25 + }, + "id": 24, + "options": { + "legend": { + "calcs": [ + "lastNotNull", + "mean" + ], + "displayMode": "table", + "placement": "right", + "showLegend": true, + "sortBy": "Mean", + "sortDesc": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "0fyLmRlGk" + }, + "editorMode": "code", + "expr": "mssqlmi_perf_gauge{gauge=\"CPU usage %\", object=~\".*Workload Group Stats\", instance=~\"${instance}\"}", + "interval": "", + "legendFormat": "{{db}}", + "range": true, + "refId": "A" + } + ], + "title": "Workload Group - %CPU Usage", + "type": "timeseries" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 33 + }, + "id": 26, + "panels": [], + "title": "Waits and Queues", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "0fyLmRlGk" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 34 + }, + "id": 28, + "options": { + "legend": { + "calcs": [ + "lastNotNull", + "mean" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true, + "sortBy": "Mean", + "sortDesc": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "0fyLmRlGk" + }, + "editorMode": "code", + "expr": "rate(mssqlmi_wait_time_seconds{instance=~\"${instance}\"}[$__rate_interval])", + "legendFormat": "{{wait_type}}", + "range": true, + "refId": "A" + } + ], + "title": "Wait Times by Type", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "0fyLmRlGk" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 34 + }, + "id": 29, + "options": { + "legend": { + "calcs": [ + "lastNotNull", + "mean" + ], + "displayMode": "table", + "placement": "right", + "showLegend": true, + "sortBy": "Mean", + "sortDesc": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "0fyLmRlGk" + }, + "editorMode": "code", + "expr": "sum by(wait_category) (rate(mssqlmi_wait_time_seconds{instance=~\"${instance}\"}[$__rate_interval]))", + "interval": "", + "legendFormat": "{{wait_category}}", + "range": true, + "refId": "A" + } + ], + "title": "Wait Times by Category", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "0fyLmRlGk" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 42 + }, + "id": 30, + "options": { + "legend": { + "calcs": [ + "lastNotNull", + "mean" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true, + "sortBy": "Mean", + "sortDesc": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "0fyLmRlGk" + }, + "editorMode": "code", + "expr": "rate(mssqlmi_signal_wait_time_seconds{instance=~\"${instance}\"}[$__rate_interval])", + "legendFormat": "{{wait_type}}", + "range": true, + "refId": "A" + } + ], + "title": "Signal Wait Times by Type", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "0fyLmRlGk" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 42 + }, + "id": 31, + "options": { + "legend": { + "calcs": [ + "lastNotNull", + "mean" + ], + "displayMode": "table", + "placement": "right", + "showLegend": true, + "sortBy": "Mean", + "sortDesc": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "0fyLmRlGk" + }, + "editorMode": "code", + "expr": "sum by(wait_category) (rate(mssqlmi_signal_wait_time_seconds{instance=~\"${instance}\"}[$__rate_interval]))", + "interval": "", + "legendFormat": "{{wait_category}}", + "range": true, + "refId": "A" + } + ], + "title": "Signal Wait Times by Category", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "0fyLmRlGk" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 50 + }, + "id": 32, + "options": { + "legend": { + "calcs": [ + "lastNotNull", + "mean" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true, + "sortBy": "Mean", + "sortDesc": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "0fyLmRlGk" + }, + "editorMode": "code", + "expr": "rate(mssqlmi_waiting_tasks_count{instance=~\"${instance}\"}[$__rate_interval])", + "legendFormat": "{{wait_type}}", + "range": true, + "refId": "A" + } + ], + "title": "Waiting Tasks by Type", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "0fyLmRlGk" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 50 + }, + "id": 33, + "options": { + "legend": { + "calcs": [ + "lastNotNull", + "mean" + ], + "displayMode": "table", + "placement": "right", + "showLegend": true, + "sortBy": "Mean", + "sortDesc": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "0fyLmRlGk" + }, + "editorMode": "code", + "expr": "sum by(wait_category) (rate(mssqlmi_waiting_tasks_count{instance=~\"${instance}\"}[$__rate_interval]))", + "interval": "", + "legendFormat": "{{wait_category}}", + "range": true, + "refId": "A" + } + ], + "title": "Waiting Tasks by Category", + "type": "timeseries" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 58 + }, + "id": 35, + "panels": [], + "title": "Log Activity", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "0fyLmRlGk" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "Bps" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 59 + }, + "id": 37, + "options": { + "legend": { + "calcs": [ + "lastNotNull", + "mean" + ], + "displayMode": "table", + "placement": "right", + "showLegend": true, + "sortBy": "Mean", + "sortDesc": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "0fyLmRlGk" + }, + "editorMode": "code", + "expr": "rate(mssqlmi_perf_counter{counter=\"Log Bytes Flushed/sec\", instance=~\"${instance}\"}[$__rate_interval])", + "interval": "", + "legendFormat": "{{db}}", + "range": true, + "refId": "A" + } + ], + "title": "Log Bytes Flushed/sec", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "0fyLmRlGk" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 59 + }, + "id": 38, + "options": { + "legend": { + "calcs": [ + "lastNotNull", + "mean" + ], + "displayMode": "table", + "placement": "right", + "showLegend": true, + "sortBy": "Mean", + "sortDesc": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "0fyLmRlGk" + }, + "editorMode": "code", + "expr": "rate(mssqlmi_perf_counter{counter=\"Log Flushes/sec\", instance=~\"${instance}\"}[$__rate_interval])", + "interval": "", + "legendFormat": "{{db}}", + "range": true, + "refId": "A" + } + ], + "title": "Log Flushes/sec", + "type": "timeseries" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 67 + }, + "id": 40, + "panels": [], + "title": "Memory", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "0fyLmRlGk" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "deckbytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 68 + }, + "id": 42, + "options": { + "legend": { + "calcs": [ + "lastNotNull", + "mean" + ], + "displayMode": "table", + "placement": "right", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "0fyLmRlGk" + }, + "editorMode": "code", + "expr": "mssqlmi_perf_gauge{object=~\".*Memory Manager\", instance=~\"${instance}\"}", + "interval": "", + "legendFormat": "{{gauge}}", + "range": true, + "refId": "A" + } + ], + "title": "Memory Manager", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "0fyLmRlGk" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "deckbytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 68 + }, + "id": 44, + "options": { + "legend": { + "calcs": [ + "lastNotNull", + "mean" + ], + "displayMode": "table", + "placement": "right", + "showLegend": true, + "sortBy": "Mean", + "sortDesc": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "0fyLmRlGk" + }, + "editorMode": "code", + "expr": "mssqlmi_clerk_size_kilobytes{instance=~\"${instance}\"}", + "legendFormat": "{{clerk_type}}", + "range": true, + "refId": "A" + } + ], + "title": "Memory Clerks", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "0fyLmRlGk" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 76 + }, + "id": 46, + "options": { + "legend": { + "calcs": [ + "lastNotNull", + "mean" + ], + "displayMode": "table", + "placement": "right", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "0fyLmRlGk" + }, + "editorMode": "code", + "expr": "mssqlmi_perf_gauge{gauge=\"Page life expectancy\", instance=~\"${instance}\"}", + "legendFormat": "{{object}}", + "range": true, + "refId": "A" + } + ], + "title": "Page Life Expectancy", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "0fyLmRlGk" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 76 + }, + "id": 47, + "options": { + "legend": { + "calcs": [ + "lastNotNull", + "mean" + ], + "displayMode": "table", + "placement": "right", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "0fyLmRlGk" + }, + "editorMode": "code", + "expr": "rate(mssqlmi_perf_counter{object=~\".*Buffer Manager\", instance=~\"${instance}\"}[$__rate_interval])", + "interval": "", + "legendFormat": "{{counter}}", + "range": true, + "refId": "A" + } + ], + "title": "Buffer Manager", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "0fyLmRlGk" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 84 + }, + "id": 48, + "options": { + "legend": { + "calcs": [ + "lastNotNull", + "mean" + ], + "displayMode": "table", + "placement": "right", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "0fyLmRlGk" + }, + "editorMode": "code", + "expr": "rate(mssqlmi_perf_counter{object=~\".*Access Methods\", instance=~\"${instance}\"}[$__rate_interval])", + "interval": "", + "legendFormat": "{{counter}}", + "range": true, + "refId": "A" + } + ], + "title": "Access Methods", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "0fyLmRlGk" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 84 + }, + "id": 49, + "options": { + "legend": { + "calcs": [ + "lastNotNull", + "mean" + ], + "displayMode": "table", + "placement": "right", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "0fyLmRlGk" + }, + "editorMode": "code", + "expr": "rate(mssqlmi_perf_counter{object=~\".*General Statistics\", instance=~\"${instance}\"}[$__rate_interval])", + "interval": "", + "legendFormat": "{{counter}}", + "range": true, + "refId": "A" + } + ], + "title": "General Statistics", + "type": "timeseries" + } + ], + "refresh": "1m", + "revision": 1, + "schemaVersion": 38, + "style": "dark", + "tags": [ + "SQLMI" + ], + "templating": { + "list": [ + { + "current": { + "selected": true, + "text": "some-instance-of-az-sql-mi.database.windows.net:1433", + "value": "some-instance-of-az-sql-mi.database.windows.net:1433" + }, + "definition": "mssqlmi_cpu_count", + "hide": 0, + "includeAll": false, + "label": "Database", + "multi": false, + "name": "instance", + "options": [], + "query": { + "query": "mssqlmi_cpu_count", + "refId": "StandardVariableQuery" + }, + "refresh": 1, + "regex": "/.*instance=\"([^\"]*).*/", + "skipUrlSync": false, + "sort": 0, + "type": "query" + } + ] + }, + "time": { + "from": "now-6h", + "to": "now" + }, + "timepicker": {}, + "timezone": "", + "title": "Azure SQL MI", + "uid": "nF3YtP1Vz", + "version": 29, + "weekStart": "" +} diff --git a/examples/azure-sql-mi/grafana-dashboard/cpu-and-queuing.png b/examples/azure-sql-mi/grafana-dashboard/cpu-and-queuing.png new file mode 100644 index 00000000..4a8ef587 Binary files /dev/null and b/examples/azure-sql-mi/grafana-dashboard/cpu-and-queuing.png differ diff --git a/examples/azure-sql-mi/grafana-dashboard/log-activity.png b/examples/azure-sql-mi/grafana-dashboard/log-activity.png new file mode 100644 index 00000000..8fbb4086 Binary files /dev/null and b/examples/azure-sql-mi/grafana-dashboard/log-activity.png differ diff --git a/examples/azure-sql-mi/grafana-dashboard/memory.png b/examples/azure-sql-mi/grafana-dashboard/memory.png new file mode 100644 index 00000000..f0d493c7 Binary files /dev/null and b/examples/azure-sql-mi/grafana-dashboard/memory.png differ diff --git a/examples/azure-sql-mi/grafana-dashboard/overview.png b/examples/azure-sql-mi/grafana-dashboard/overview.png new file mode 100644 index 00000000..20d90b9a Binary files /dev/null and b/examples/azure-sql-mi/grafana-dashboard/overview.png differ diff --git a/examples/azure-sql-mi/grafana-dashboard/sql-activity.png b/examples/azure-sql-mi/grafana-dashboard/sql-activity.png new file mode 100644 index 00000000..8241e1b7 Binary files /dev/null and b/examples/azure-sql-mi/grafana-dashboard/sql-activity.png differ diff --git a/examples/azure-sql-mi/grafana-dashboard/waits-and-queues.png b/examples/azure-sql-mi/grafana-dashboard/waits-and-queues.png new file mode 100644 index 00000000..37b9805e Binary files /dev/null and b/examples/azure-sql-mi/grafana-dashboard/waits-and-queues.png differ diff --git a/examples/azure-sql-mi/mssql_mi_clerk.collector.yml b/examples/azure-sql-mi/mssql_mi_clerk.collector.yml new file mode 100644 index 00000000..2b169407 --- /dev/null +++ b/examples/azure-sql-mi/mssql_mi_clerk.collector.yml @@ -0,0 +1,36 @@ +# A collector defining memory clerk metrics for Microsoft SQL Server (Managed Instance). +# +# It is required that the SQL Server user has the following permissions: +# +# GRANT VIEW ANY DEFINITION TO +# GRANT VIEW SERVER STATE TO +# +collector_name: mssqlmi_clerk + +# Similar to global.min_interval, but applies to the queries defined by this collector only. +#min_interval: 0s + +metrics: + # + # Collected from sys.dm_os_memory_clerks + # + - metric_name: mssqlmi_clerk_size_kilobytes + type: gauge + help: 'Memory Clerk' + key_labels: + - clerk_type + values: [size_kb] + query_ref: mssqlmi_clerk + +queries: + - query_name: mssqlmi_clerk + query: | + SELECT + mc.[type] AS [clerk_type] + ,SUM(mc.[pages_kb]) AS [size_kb] + FROM sys.[dm_os_memory_clerks] AS mc WITH (NOLOCK) + GROUP BY + mc.[type] + HAVING + SUM(mc.[pages_kb]) >= 1024 + OPTION(RECOMPILE); diff --git a/examples/azure-sql-mi/mssql_mi_perf.collector.yml b/examples/azure-sql-mi/mssql_mi_perf.collector.yml new file mode 100644 index 00000000..20e64a79 --- /dev/null +++ b/examples/azure-sql-mi/mssql_mi_perf.collector.yml @@ -0,0 +1,226 @@ +# A collector defining performance metrics for Microsoft SQL Server (Managed Instance). +# +# It is required that the SQL Server user has the following permissions: +# +# GRANT VIEW ANY DEFINITION TO +# GRANT VIEW SERVER STATE TO +# +collector_name: mssqlmi_perf + +# Similar to global.min_interval, but applies to the queries defined by this collector only. +#min_interval: 0s + +metrics: + # + # Collected from sys.dm_os_performance_counters + # + - metric_name: mssqlmi_perf_counter + type: counter + help: 'Performance counters' + key_labels: + - db + - object + - counter + values: [counter_value] + query_ref: mssqlmi_performance_counters + + - metric_name: mssqlmi_perf_gauge + type: gauge + help: 'Performance gauges' + key_labels: + - db + - object + - gauge + values: [counter_value] + query_ref: mssqlmi_performance_gauges + +queries: + - query_name: mssqlmi_performance_counters + query: | + SET DEADLOCK_PRIORITY -10; + IF SERVERPROPERTY('EngineEdition') <> 8 BEGIN /*not Azure Managed Instance*/ + DECLARE @ErrorMessage AS nvarchar(500) = 'sql_exporter - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure Managed Instance.'; + RAISERROR (@ErrorMessage,11,1) + RETURN + END + DECLARE @PCounters TABLE + ( + [object_name] nvarchar(128), + [counter_name] nvarchar(128), + [instance_name] nvarchar(128), + [cntr_value] bigint, + [cntr_type] INT , + Primary Key([object_name],[counter_name],[instance_name]) + ); + WITH PerfCounters AS ( + SELECT DISTINCT + RTrim(spi.[object_name]) [object_name] + ,RTrim(spi.[counter_name]) [counter_name] + ,CASE WHEN ( + RTRIM(spi.[object_name]) LIKE '%:Databases' + OR RTRIM(spi.[object_name]) LIKE '%:Database Replica' + OR RTRIM(spi.[object_name]) LIKE '%:Catalog Metadata' + OR RTRIM(spi.[object_name]) LIKE '%:Query Store' + OR RTRIM(spi.[object_name]) LIKE '%:Columnstore' + OR RTRIM(spi.[object_name]) LIKE '%:Advanced Analytics') + AND TRY_CONVERT([uniqueidentifier], spi.[instance_name]) IS NOT NULL -- for cloud only + THEN ISNULL(d.[name],RTRIM(spi.instance_name)) -- Elastic Pools counters exist for all databases but sys.databases only has current DB value + WHEN + RTRIM([object_name]) LIKE '%:Availability Replica' + AND TRY_CONVERT([uniqueidentifier], spi.[instance_name]) IS NOT NULL -- for cloud only + THEN ISNULL(d.[name],RTRIM(spi.[instance_name])) + RTRIM(SUBSTRING(spi.[instance_name], 37, LEN(spi.[instance_name]))) + ELSE RTRIM(spi.instance_name) + END AS [instance_name] + ,CAST(spi.[cntr_value] AS BIGINT) AS [cntr_value] + ,spi.[cntr_type] + FROM sys.dm_os_performance_counters AS spi + LEFT JOIN sys.databases AS d + ON LEFT(spi.[instance_name], 36) -- some instance_name values have an additional identifier appended after the GUID + = CASE + /*in SQL DB standalone, physical_database_name for master is the GUID of the user database*/ + WHEN d.[name] = 'master' AND TRY_CONVERT([uniqueidentifier], d.[physical_database_name]) IS NOT NULL + THEN d.[name] + ELSE d.[physical_database_name] + END + WHERE + counter_name IN ( + -- following are all counters + -- from Databases object + 'Transactions/sec' + ,'Log Bytes Flushed/sec' + ,'Log Flushes/sec' + -- from SQL Statistics object + ,'Batch Requests/sec' + ,'SQL Compilations/sec' + ,'SQL Re-Compilations/sec' + -- from Access Methods object + ,'Forwarded Records/sec' + ,'Full Scans/sec' + ,'Index Searches/sec' + ,'Page Splits/sec' + ,'Table Lock Escalations/sec' + ,'Workfiles Created/sec' + ,'Worktables Created/sec' + -- from General Statistics + ,'Active Temp Tables' + ,'Logins/sec' + ,'Logouts/sec' + ,'Logical Connections' + ,'Processes blocked' + ,'Temp Tables Creation Rate' + ,'Temp Tables For Destruction' + ,'User Connections' + -- from Buffer Manager + ,'Background writer pages/sec' + ,'Free list stalls/sec' + ,'Lazy writes/sec' + ,'Page lookups/sec' + ,'Page reads/sec' + ,'Page writes/sec' + ,'Readahead pages/sec' + ) + ) + INSERT INTO @PCounters select * from PerfCounters + SELECT + pc.[object_name] AS [object] + ,pc.[counter_name] AS [counter] + ,CASE pc.[instance_name] + WHEN '_Total' THEN 'Total' + ELSE ISNULL(pc.[instance_name],'') + END AS [db] + ,pc.[cntr_value] AS [counter_value] + from @PCounters pc + OPTION (RECOMPILE); + + - query_name: mssqlmi_performance_gauges + query: | + SET DEADLOCK_PRIORITY -10; + IF SERVERPROPERTY('EngineEdition') <> 8 BEGIN /*not Azure Managed Instance*/ + DECLARE @ErrorMessage AS nvarchar(500) = 'sql_exporter - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure Managed Instance.'; + RAISERROR (@ErrorMessage,11,1) + RETURN + END + DECLARE @PCounters TABLE + ( + [object_name] nvarchar(128), + [counter_name] nvarchar(128), + [instance_name] nvarchar(128), + [cntr_value] bigint, + [cntr_type] INT , + Primary Key([object_name],[counter_name],[instance_name]) + ); + WITH PerfCounters AS ( + SELECT DISTINCT + RTrim(spi.[object_name]) [object_name] + ,RTrim(spi.[counter_name]) [counter_name] + ,CASE WHEN ( + RTRIM(spi.[object_name]) LIKE '%:Databases' + OR RTRIM(spi.[object_name]) LIKE '%:Database Replica' + OR RTRIM(spi.[object_name]) LIKE '%:Buffer Manager' + OR RTRIM(spi.[object_name]) LIKE '%:Buffer Node' + OR RTRIM(spi.[object_name]) LIKE '%:Catalog Metadata' + OR RTRIM(spi.[object_name]) LIKE '%:Query Store' + OR RTRIM(spi.[object_name]) LIKE '%:Columnstore' + OR RTRIM(spi.[object_name]) LIKE '%:Advanced Analytics') + AND TRY_CONVERT([uniqueidentifier], spi.[instance_name]) IS NOT NULL -- for cloud only + THEN ISNULL(d.[name],RTRIM(spi.instance_name)) -- Elastic Pools counters exist for all databases but sys.databases only has current DB value + WHEN + RTRIM([object_name]) LIKE '%:Availability Replica' + AND TRY_CONVERT([uniqueidentifier], spi.[instance_name]) IS NOT NULL -- for cloud only + THEN ISNULL(d.[name],RTRIM(spi.[instance_name])) + RTRIM(SUBSTRING(spi.[instance_name], 37, LEN(spi.[instance_name]))) + ELSE RTRIM(spi.instance_name) + END AS [instance_name] + ,CAST(spi.[cntr_value] AS BIGINT) AS [cntr_value] + ,spi.[cntr_type] + FROM sys.dm_os_performance_counters AS spi + LEFT JOIN sys.databases AS d + ON LEFT(spi.[instance_name], 36) -- some instance_name values have an additional identifier appended after the GUID + = CASE + /*in SQL DB standalone, physical_database_name for master is the GUID of the user database*/ + WHEN d.[name] = 'master' AND TRY_CONVERT([uniqueidentifier], d.[physical_database_name]) IS NOT NULL + THEN d.[name] + ELSE d.[physical_database_name] + END + WHERE + counter_name IN ( + -- following are all gauges + -- from Resource Pool Stats/Workload Group Stats objects + 'CPU usage %' + ,'CPU usage % base' + -- from Memory Manager object + --,'Memory Grants Outstanding' + --,'Memory Grants Pending' + ,'Target Server Memory (KB)' + ,'Total Server Memory (KB)' + -- from General Statistics + --,'Active Temp Tables' + --,'Logical Connections' + --,'Processes blocked' + --,'Temp Tables Creation Rate' + --,'Temp Tables For Destruction' + --,'User Connections' + -- from Buffer Manager + ,'Page life expectancy' + ) + ) + INSERT INTO @PCounters select * from PerfCounters + SELECT + pc.[object_name] AS [object] + ,pc.[counter_name] AS [gauge] + ,CASE pc.[instance_name] + WHEN '_Total' THEN 'Total' + ELSE ISNULL(pc.[instance_name],'') + END AS [db] + ,CAST(CASE WHEN pc.[cntr_type] = 537003264 AND pc1.[cntr_value] > 0 THEN (pc.[cntr_value] * 1.0) / (pc1.[cntr_value] * 1.0) * 100 ELSE pc.[cntr_value] END AS float(10)) AS [counter_value] + from @PCounters pc + LEFT OUTER JOIN @PCounters AS pc1 + ON ( + pc.[counter_name] = REPLACE(pc1.[counter_name],' base','') + OR pc.[counter_name] = REPLACE(pc1.[counter_name],' base',' (ms)') + ) + AND pc.[object_name] = pc1.[object_name] + AND pc.[instance_name] = pc1.[instance_name] + AND pc1.[counter_name] LIKE '%base' + WHERE + pc.[counter_name] NOT LIKE '% base' + OPTION (RECOMPILE); diff --git a/examples/azure-sql-mi/mssql_mi_properties.collector.yml b/examples/azure-sql-mi/mssql_mi_properties.collector.yml new file mode 100644 index 00000000..16a75b8d --- /dev/null +++ b/examples/azure-sql-mi/mssql_mi_properties.collector.yml @@ -0,0 +1,74 @@ +# A collector defining standard metrics for Microsoft SQL Server (Managed Instance). +# +# It is required that the SQL Server user has the following permissions: +# +# GRANT VIEW ANY DEFINITION TO +# GRANT VIEW SERVER STATE TO +# +collector_name: mssqlmi_properties + +# Similar to global.min_interval, but applies to the queries defined by this collector only. +#min_interval: 0s + +metrics: + # + # Collected from sys.server_resource_stats + # + - metric_name: mssqlmi_cpu_count + type: gauge + help: 'Virtual Cores' + values: [cpu_count] + query_ref: mssqlmi_properties + + - metric_name: mssqlmi_server_memory_bytes + type: gauge + help: 'Server Memory in bytes' + values: [server_memory] + query_ref: mssqlmi_properties + + - metric_name: mssqlmi_total_storage_bytes + type: gauge + help: 'Total Storage in bytes' + key_labels: + # populated from sku column + - sku + - hardware_type + values: [total_storage] + query_ref: mssqlmi_properties + + - metric_name: mssqlmi_available_storage_bytes + type: gauge + help: 'Available Storage in bytes' + values: [available_storage] + query_ref: mssqlmi_properties + + - metric_name: mssqlmi_db_online + type: gauge + help: '# of Online Databases' + values: [db_online] + query_ref: mssqlmi_properties + +queries: + - query_name: mssqlmi_properties + query: | + IF SERVERPROPERTY('EngineEdition') <> 8 BEGIN /*not Azure Managed Instance*/ + DECLARE @ErrorMessage AS nvarchar(500) = 'sql_exporter - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure Managed Instance.'; + RAISERROR (@ErrorMessage,11,1) + RETURN + END + SELECT TOP 1 + [virtual_core_count] AS [cpu_count] + ,(SELECT [process_memory_limit_mb]*1000000 FROM sys.dm_os_job_object) AS [server_memory] + ,[sku] + ,[hardware_generation] AS [hardware_type] + ,cast([reserved_storage_mb]*1000000 as bigint) AS [total_storage] + ,cast(([reserved_storage_mb] - [storage_space_used_mb])*1000000 as bigint) AS [available_storage] + ,[db_online] + FROM sys.server_resource_stats + CROSS APPLY ( + SELECT + SUM( CASE WHEN [state] = 0 THEN 1 ELSE 0 END ) AS [db_online] + FROM sys.databases + ) AS dbs + ORDER BY + [start_time] DESC; diff --git a/examples/azure-sql-mi/mssql_mi_size.collector.yml b/examples/azure-sql-mi/mssql_mi_size.collector.yml new file mode 100644 index 00000000..31061d72 --- /dev/null +++ b/examples/azure-sql-mi/mssql_mi_size.collector.yml @@ -0,0 +1,50 @@ +# A collector defining database size metrics for Microsoft SQL Server (Managed Instance). +# +# It is required that the SQL Server user has the following permissions: +# +# GRANT VIEW ANY DEFINITION TO +# GRANT VIEW SERVER STATE TO +# +collector_name: mssqlmi_size + +# Similar to global.min_interval, but applies to the queries defined by this collector only. +#min_interval: 0s + +metrics: + # + # Collected from sys.dm_io_virtual_file_stats + # + - metric_name: mssqlmi_database_size_bytes + type: gauge + help: 'Database Size in Bytes' + key_labels: + - database + - file_type + values: [size_on_disk_bytes] + query_ref: mssqlmi_size + +queries: + - query_name: mssqlmi_size + query: | + IF SERVERPROPERTY('EngineEdition') <> 8 BEGIN /*not Azure Managed Instance*/ + DECLARE @ErrorMessage AS nvarchar(500) = 'sql_exporter - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure Managed Instance.'; + RAISERROR (@ErrorMessage,11,1) + RETURN + END + SELECT + DB_NAME(mf.database_id) AS [database] + -- ,name AS 'File Logical Name' + ,'file_type' = CASE WHEN type_desc = 'LOG' THEN 'Log File' WHEN type_desc = 'ROWS' THEN 'Data File' ELSE type_desc END + -- ,mf.physical_name AS 'File Physical Name' + ,size_on_disk_bytes + -- ,size_on_disk_bytes/ 1024 AS 'Size(KB)' + -- size_on_disk_bytes/ 1024 / 1024 AS 'Size(MB)', + -- size_on_disk_bytes/ 1024 / 1024 / 1024 AS 'Size(GB)' + FROM + sys.dm_io_virtual_file_stats(NULL, NULL) AS divfs + JOIN sys.master_files AS mf + ON mf.database_id = divfs.database_id + AND mf.file_id = divfs.file_id + WHERE DB_NAME(mf.database_id) NOT IN ('master', 'model','tempdb', 'msdb') + AND name in ('data_0', 'log') + -- ORDER BY size_on_disk_bytes DESC diff --git a/examples/azure-sql-mi/mssql_mi_wait.collector.yml b/examples/azure-sql-mi/mssql_mi_wait.collector.yml new file mode 100644 index 00000000..809f693a --- /dev/null +++ b/examples/azure-sql-mi/mssql_mi_wait.collector.yml @@ -0,0 +1,153 @@ +# A collector defining wait metrics for Microsoft SQL Server (Managed Instance). +# +# It is required that the SQL Server user has the following permissions: +# +# GRANT VIEW ANY DEFINITION TO +# GRANT VIEW SERVER STATE TO +# +collector_name: mssqlmi_wait + +# Similar to global.min_interval, but applies to the queries defined by this collector only. +#min_interval: 0s + +metrics: + # + # Collected from sys.dm_os_wait_stats + # + - metric_name: mssqlmi_wait_time_seconds + type: gauge + help: 'Wait Time in Seconds' + key_labels: + - wait_type + - wait_category + values: [wait_time_seconds] + query_ref: mssqlmi_wait + - metric_name: mssqlmi_signal_wait_time_seconds + type: gauge + help: 'Signal Wait Time in Seconds' + key_labels: + - wait_type + - wait_category + values: [signal_wait_time_seconds] + query_ref: mssqlmi_wait + - metric_name: mssqlmi_waiting_tasks_count + type: gauge + help: 'Wait Tasks Count' + key_labels: + - wait_type + - wait_category + values: [waiting_tasks_count] + query_ref: mssqlmi_wait + +queries: + - query_name: mssqlmi_wait + query: | + IF SERVERPROPERTY('EngineEdition') <> 8 BEGIN /*not Azure Managed Instance*/ + DECLARE @ErrorMessage AS nvarchar(500) = 'sql_exporter - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure Managed Instance.'; + RAISERROR (@ErrorMessage,11,1) + RETURN + END + SELECT + ws.[wait_type] + ,CAST([wait_time_ms] / 1000.0 AS FLOAT(10)) AS [wait_time_seconds] + --,[wait_time_ms] - [signal_wait_time_ms] AS [resource_wait_ms] + ,CAST([signal_wait_time_ms] / 1000.0 AS FLOAT(10)) AS [signal_wait_time_seconds] + -- ,[max_wait_time_ms] + ,[waiting_tasks_count] + ,CASE + WHEN ws.[wait_type] LIKE 'SOS_SCHEDULER_YIELD' then 'CPU' + WHEN ws.[wait_type] = 'THREADPOOL' THEN 'Worker Thread' + WHEN ws.[wait_type] LIKE 'LCK[_]%' THEN 'Lock' + WHEN ws.[wait_type] LIKE 'LATCH[_]%' THEN 'Latch' + WHEN ws.[wait_type] LIKE 'PAGELATCH[_]%' THEN 'Buffer Latch' + WHEN ws.[wait_type] LIKE 'PAGEIOLATCH[_]%' THEN 'Buffer IO' + WHEN ws.[wait_type] LIKE 'RESOURCE_SEMAPHORE_QUERY_COMPILE%' THEN 'Compilation' + WHEN ws.[wait_type] LIKE 'CLR[_]%' or ws.[wait_type] like 'SQLCLR%' THEN 'SQL CLR' + WHEN ws.[wait_type] LIKE 'DBMIRROR_%' THEN 'Mirroring' + WHEN ws.[wait_type] LIKE 'DTC[_]%' or ws.[wait_type] LIKE 'DTCNEW%' or ws.[wait_type] LIKE 'TRAN_%' + or ws.[wait_type] LIKE 'XACT%' or ws.[wait_type] like 'MSQL_XACT%' THEN 'Transaction' + WHEN ws.[wait_type] LIKE 'SLEEP[_]%' + or ws.[wait_type] IN ( + 'LAZYWRITER_SLEEP', 'SQLTRACE_BUFFER_FLUSH', 'SQLTRACE_INCREMENTAL_FLUSH_SLEEP', + 'SQLTRACE_WAIT_ENTRIES', 'FT_IFTS_SCHEDULER_IDLE_WAIT', 'XE_DISPATCHER_WAIT', + 'REQUEST_FOR_DEADLOCK_SEARCH', 'LOGMGR_QUEUE', 'ONDEMAND_TASK_QUEUE', + 'CHECKPOINT_QUEUE', 'XE_TIMER_EVENT') THEN 'Idle' + WHEN ws.[wait_type] IN( + 'ASYNC_IO_COMPLETION','BACKUPIO','CHKPT','WRITE_COMPLETION', + 'IO_QUEUE_LIMIT', 'IO_RETRY') THEN 'Other Disk IO' + WHEN ws.[wait_type] LIKE 'PREEMPTIVE_%' THEN 'Preemptive' + WHEN ws.[wait_type] LIKE 'BROKER[_]%' THEN 'Service Broker' + WHEN ws.[wait_type] IN ( + 'WRITELOG','LOGBUFFER','LOGMGR_RESERVE_APPEND', + 'LOGMGR_FLUSH', 'LOGMGR_PMM_LOG') THEN 'Tran Log IO' + WHEN ws.[wait_type] LIKE 'LOG_RATE%' then 'Log Rate Governor' + WHEN ws.[wait_type] LIKE 'HADR_THROTTLE[_]%' + or ws.[wait_type] = 'THROTTLE_LOG_RATE_LOG_STORAGE' THEN 'HADR Log Rate Governor' + WHEN ws.[wait_type] LIKE 'RBIO_RG%' or ws.[wait_type] like 'WAIT_RBIO_RG%' then 'VLDB Log Rate Governor' + WHEN ws.[wait_type] LIKE 'RBIO[_]%' or ws.[wait_type] like 'WAIT_RBIO[_]%' then 'VLDB RBIO' + WHEN ws.[wait_type] IN( + 'ASYNC_NETWORK_IO','EXTERNAL_SCRIPT_NETWORK_IOF', + 'NET_WAITFOR_PACKET','PROXY_NETWORK_IO') THEN 'Network IO' + WHEN ws.[wait_type] IN ( 'CXPACKET', 'CXCONSUMER') + or ws.[wait_type] like 'HT%' or ws.[wait_type] like 'BMP%' + or ws.[wait_type] like 'BP%' THEN 'Parallelism' + WHEN ws.[wait_type] IN( + 'CMEMTHREAD','CMEMPARTITIONED','EE_PMOLOCK','EXCHANGE', + 'RESOURCE_SEMAPHORE','MEMORY_ALLOCATION_EXT', + 'RESERVED_MEMORY_ALLOCATION_EXT', 'MEMORY_GRANT_UPDATE') THEN 'Memory' + WHEN ws.[wait_type] IN ('WAITFOR','WAIT_FOR_RESULTS') THEN 'User Wait' + WHEN ws.[wait_type] LIKE 'HADR[_]%' or ws.[wait_type] LIKE 'PWAIT_HADR%' + or ws.[wait_type] LIKE 'REPLICA[_]%' or ws.[wait_type] LIKE 'REPL_%' + or ws.[wait_type] LIKE 'SE_REPL[_]%' + or ws.[wait_type] LIKE 'FCB_REPLICA%' THEN 'Replication' + WHEN ws.[wait_type] LIKE 'SQLTRACE[_]%' + or ws.[wait_type] IN ( + 'TRACEWRITE', 'SQLTRACE_LOCK', 'SQLTRACE_FILE_BUFFER', 'SQLTRACE_FILE_WRITE_IO_COMPLETION', + 'SQLTRACE_FILE_READ_IO_COMPLETION', 'SQLTRACE_PENDING_BUFFER_WRITERS', 'SQLTRACE_SHUTDOWN', + 'QUERY_TRACEOUT', 'TRACE_EVTNOTIF') THEN 'Tracing' + WHEN ws.[wait_type] IN ( + 'FT_RESTART_CRAWL', 'FULLTEXT GATHERER', 'MSSEARCH', 'FT_METADATA_MUTEX', + 'FT_IFTSHC_MUTEX', 'FT_IFTSISM_MUTEX', 'FT_IFTS_RWLOCK', 'FT_COMPROWSET_RWLOCK', + 'FT_MASTER_MERGE', 'FT_PROPERTYLIST_CACHE', 'FT_MASTER_MERGE_COORDINATOR', + 'PWAIT_RESOURCE_SEMAPHORE_FT_PARALLEL_QUERY_SYNC') THEN 'Full Text Search' + ELSE 'Other' + END as [wait_category] + FROM sys.dm_os_wait_stats AS ws WITH (NOLOCK) + WHERE + ws.[wait_type] NOT IN ( + N'BROKER_EVENTHANDLER', N'BROKER_RECEIVE_WAITFOR', N'BROKER_TASK_STOP', + N'BROKER_TO_FLUSH', N'BROKER_TRANSMITTER', N'CHECKPOINT_QUEUE', + N'CHKPT', N'CLR_AUTO_EVENT', N'CLR_MANUAL_EVENT', N'CLR_SEMAPHORE', + N'DBMIRROR_DBM_EVENT', N'DBMIRROR_EVENTS_QUEUE', N'DBMIRROR_QUEUE', + N'DBMIRRORING_CMD', N'DIRTY_PAGE_POLL', N'DISPATCHER_QUEUE_SEMAPHORE', + N'EXECSYNC', N'FSAGENT', N'FT_IFTS_SCHEDULER_IDLE_WAIT', N'FT_IFTSHC_MUTEX', + N'HADR_CLUSAPI_CALL', N'HADR_FILESTREAM_IOMGR_IOCOMPLETION', N'HADR_LOGCAPTURE_WAIT', + N'HADR_NOTIFICATION_DEQUEUE', N'HADR_TIMER_TASK', N'HADR_WORK_QUEUE', + N'KSOURCE_WAKEUP', N'LAZYWRITER_SLEEP', N'LOGMGR_QUEUE', + N'MEMORY_ALLOCATION_EXT', N'ONDEMAND_TASK_QUEUE', + N'PARALLEL_REDO_WORKER_WAIT_WORK', + N'PREEMPTIVE_HADR_LEASE_MECHANISM', N'PREEMPTIVE_SP_SERVER_DIAGNOSTICS', + N'PREEMPTIVE_OS_LIBRARYOPS', N'PREEMPTIVE_OS_COMOPS', N'PREEMPTIVE_OS_CRYPTOPS', + N'PREEMPTIVE_OS_PIPEOPS','PREEMPTIVE_OS_GENERICOPS', N'PREEMPTIVE_OS_VERIFYTRUST', + N'PREEMPTIVE_OS_DEVICEOPS', + N'PREEMPTIVE_XE_CALLBACKEXECUTE', N'PREEMPTIVE_XE_DISPATCHER', + N'PREEMPTIVE_XE_GETTARGETSTATE', N'PREEMPTIVE_XE_SESSIONCOMMIT', + N'PREEMPTIVE_XE_TARGETINIT', N'PREEMPTIVE_XE_TARGETFINALIZE', + N'PWAIT_ALL_COMPONENTS_INITIALIZED', N'PWAIT_DIRECTLOGCONSUMER_GETNEXT', + N'QDS_PERSIST_TASK_MAIN_LOOP_SLEEP', + N'QDS_ASYNC_QUEUE', + N'QDS_CLEANUP_STALE_QUERIES_TASK_MAIN_LOOP_SLEEP', N'REQUEST_FOR_DEADLOCK_SEARCH', + N'RESOURCE_QUEUE', N'SERVER_IDLE_CHECK', N'SLEEP_BPOOL_FLUSH', N'SLEEP_DBSTARTUP', + N'SLEEP_DCOMSTARTUP', N'SLEEP_MASTERDBREADY', N'SLEEP_MASTERMDREADY', + N'SLEEP_MASTERUPGRADED', N'SLEEP_MSDBSTARTUP', N'SLEEP_SYSTEMTASK', N'SLEEP_TASK', + N'SLEEP_TEMPDBSTARTUP', N'SNI_HTTP_ACCEPT', N'SP_SERVER_DIAGNOSTICS_SLEEP', + N'SQLTRACE_BUFFER_FLUSH', N'SQLTRACE_INCREMENTAL_FLUSH_SLEEP', + N'SQLTRACE_WAIT_ENTRIES', + N'WAIT_FOR_RESULTS', N'WAITFOR', N'WAITFOR_TASKSHUTDOWN', N'WAIT_XTP_HOST_WAIT', + N'WAIT_XTP_OFFLINE_CKPT_NEW_LOG', N'WAIT_XTP_CKPT_CLOSE', + N'XE_BUFFERMGR_ALLPROCESSED_EVENT', N'XE_DISPATCHER_JOIN', + N'XE_DISPATCHER_WAIT', N'XE_LIVE_TARGET_TVF', N'XE_TIMER_EVENT', + N'SOS_WORK_DISPATCHER','RESERVED_MEMORY_ALLOCATION_EXT','SQLTRACE_WAIT_ENTRIES', + N'RBIO_COMM_RETRY') + AND [waiting_tasks_count] > 10 + AND [wait_time_ms] > 100; diff --git a/examples/azure-sql-mi/sql_exporter.yml b/examples/azure-sql-mi/sql_exporter.yml new file mode 100644 index 00000000..eb9a5563 --- /dev/null +++ b/examples/azure-sql-mi/sql_exporter.yml @@ -0,0 +1,28 @@ +# Global defaults. +global: + # Subtracted from Prometheus' scrape_timeout to give us some headroom and prevent Prometheus from timing out first. + scrape_timeout_offset: 500ms + # Minimum interval between collector runs: by default (0s) collectors are executed on every scrape. + min_interval: 0s + # Maximum number of open connections to any one target. Metric queries will run concurrently on multiple connections, + # as will concurrent scrapes. + max_connections: 3 + # Maximum number of idle connections to any one target. Unless you use very long collection intervals, this should + # always be the same as max_connections. + max_idle_connections: 3 + # Maximum number of maximum amount of time a connection may be reused. Expired connections may be closed lazily before reuse. + # If 0, connections are not closed due to a connection's age. + max_connection_lifetime: 5m + +# The target to monitor and the collectors to execute on it. +target: + # Data source name always has a URI schema that matches the driver name. In some cases (e.g. MySQL) + # the schema gets dropped or replaced to match the driver expected DSN format. + data_source_name: 'sqlserver://USERNAME_HERE:PASSWORD_HERE@SQLMI_HERE_ENDPOINT.database.windows.net:1433?encrypt=true&hostNameInCertificate=%2A.SQL_MI_DOMAIN_HERE.database.windows.net&trustservercertificate=true' + + # Collectors (referenced by name) to execute on the target. + collectors: [mssqlmi_*] + +# Collector files specifies a list of globs. One collector definition is read from each matching file. +collector_files: + - "*.collector.yml" diff --git a/examples/postgres-16.yml b/examples/postgres-16.yml new file mode 100644 index 00000000..54b985d3 --- /dev/null +++ b/examples/postgres-16.yml @@ -0,0 +1,787 @@ +# This example contains two collectors, one to collect server metrics, and a second to collect database level metrics. +# All metrics are prefixed accoring to which collector they originate from "pg_db_" or "pg_server_". +# Many of these have been copied from https://github.com/prometheus-community/postgres_exporter +jobs: + - job_name: database + collectors: [database] + static_configs: + - targets: + foo: 'postgresql://postgres@/var/run/postgresql/foo?sslmode=disable' + bar: 'postgresql://postgres@/var/run/postgresql/bar?sslmode=disable' + - job_name: server + collectors: [server] + static_configs: + - targets: + server: 'postgresql://postgres@/var/run/postgresql?sslmode=disable' + +collectors: + - collector_name: database + metrics: + - metric_name: pg_db_stat_user_tables_seq_scan + type: counter + help: 'Number of sequential scans initiated on this table' + key_labels: + - relname + values: + - seq_scan + query_ref: _db_pg_stat_user_tables + + - metric_name: pg_db_stat_user_tables_seq_tup_read + type: counter + help: 'Number of live rows fetched by sequential scans' + key_labels: + - relname + values: + - seq_tup_read + query_ref: _db_pg_stat_user_tables + + - metric_name: pg_db_stat_user_tables_idx_scan + type: counter + help: 'Number of index scans initiated on this table' + key_labels: + - relname + values: + - idx_scan + query_ref: _db_pg_stat_user_tables + + - metric_name: pg_db_stat_user_tables_idx_tup_fetch + type: counter + help: 'Number of live rows fetched by index scans' + key_labels: + - relname + values: + - idx_tup_fetch + query_ref: _db_pg_stat_user_tables + + - metric_name: pg_db_stat_user_tables_n_tup_ins + type: counter + help: 'Total number of rows inserted' + key_labels: + - relname + values: + - n_tup_ins + query_ref: _db_pg_stat_user_tables + + - metric_name: pg_db_stat_user_tables_n_tup_upd + type: counter + help: 'Total number of rows updated. (This includes row updates counted in n_tup_hot_upd and n_tup_newpage_upd, and remaining non-HOT updates.)' + key_labels: + - relname + values: + - n_tup_upd + query_ref: _db_pg_stat_user_tables + + - metric_name: pg_db_stat_user_tables_n_tup_del + type: counter + help: 'Total number of rows deleted' + key_labels: + - relname + values: + - n_tup_del + query_ref: _db_pg_stat_user_tables + + - metric_name: pg_db_stat_user_tables_n_tup_hot_upd + type: counter + help: 'Number of rows HOT updated. These are updates where no successor versions are required in indexes.' + key_labels: + - relname + values: + - n_tup_hot_upd + query_ref: _db_pg_stat_user_tables + + - metric_name: pg_db_stat_user_tables_n_live_tup + type: gauge + help: 'Estimated number of live rows' + key_labels: + - relname + values: + - n_live_tup + query_ref: _db_pg_stat_user_tables + + - metric_name: pg_db_stat_user_tables_n_dead_tup + type: gauge + help: 'Estimated number of dead rows' + key_labels: + - relname + values: + - n_dead_tup + query_ref: _db_pg_stat_user_tables + + - metric_name: pg_db_stat_user_tables_n_mod_since_analyze + type: counter + help: 'Estimated number of rows modified since this table was last analyzed' + key_labels: + - relname + values: + - n_mod_since_analyze + query_ref: _db_pg_stat_user_tables + + - metric_name: pg_db_stat_user_tables_n_ins_since_vacuum + type: counter + help: 'Estimated number of rows inserted since this table was last vacuumed' + key_labels: + - relname + values: + - n_ins_since_vacuum + query_ref: _db_pg_stat_user_tables + + - metric_name: pg_db_stat_user_tables_last_vacuum + type: gauge + help: 'Last time at which this table was manually vacuumed (not counting VACUUM FULL)' + key_labels: + - relname + values: + - last_vacuum + query_ref: _db_pg_stat_user_tables + + - metric_name: pg_db_stat_user_tables_last_autovacuum + type: gauge + help: 'Last time at which this table was vacuumed by the autovacuum daemon' + key_labels: + - relname + values: + - last_autovacuum + query_ref: _db_pg_stat_user_tables + + - metric_name: pg_db_stat_user_tables_last_analyze + type: gauge + help: 'Last time at which this table was manually analyzed' + key_labels: + - relname + values: + - last_analyze + query_ref: _db_pg_stat_user_tables + + - metric_name: pg_db_stat_user_tables_last_autoanalyze + type: gauge + help: 'Last time at which this table was analyzed by the autovacuum daemon' + key_labels: + - relname + values: + - last_autoanalyze + query_ref: _db_pg_stat_user_tables + + - metric_name: pg_db_stat_user_tables_vacuum_count + type: counter + help: 'Number of times this table has been manually vacuumed (not counting VACUUM FULL)' + key_labels: + - relname + values: + - vacuum_count + query_ref: _db_pg_stat_user_tables + + - metric_name: pg_db_stat_user_tables_autovacuum_count + type: counter + help: 'Number of times this table has been vacuumed by the autovacuum daemon' + key_labels: + - relname + values: + - autovacuum_count + query_ref: _db_pg_stat_user_tables + + - metric_name: pg_db_stat_user_tables_analyze_count + type: counter + help: 'Number of times this table has been manually analyzed' + key_labels: + - relname + values: + - analyze_count + query_ref: _db_pg_stat_user_tables + + - metric_name: pg_db_stat_user_tables_autoanalyze_count + type: counter + help: 'Number of times this table has been analyzed by the autovacuum daemon' + key_labels: + - relname + values: + - autoanalyze_count + query_ref: _db_pg_stat_user_tables + + - metric_name: pg_db_stat_user_tables_size_bytes + type: gauge + help: 'Total disk space used by the table, including all indexes and TOAST data. The result is equivalent to pg_table_size + pg_indexes_size.' + key_labels: + - relname + values: + - total_size + query_ref: _db_pg_stat_user_tables + + - metric_name: pg_db_stat_activity_transactions + type: gauge + help: 'Number of transactions currently in progress' + values: + - transactions + query_ref: _db_pg_stat_activity + + - metric_name: pg_db_stat_activity_oldest_timestamp_seconds + type: gauge + help: 'Age of the oldest transaction in seconds' + values: + - oldest_timestamp_seconds + query_ref: _db_pg_stat_activity + + - metric_name: pg_db_statio_user_indexes_idx_blks_read + type: counter + help: 'Number of disk blocks read from this index' + key_labels: + - relname + - indexrelname + values: + - idx_blks_read + query_ref: _db_pg_statio_user_indexes + + - metric_name: pg_db_statio_user_indexes_idx_blks_hit + type: counter + help: 'Number of buffer hits in this index' + key_labels: + - relname + - indexrelname + values: + - idx_blks_hit + query_ref: _db_pg_statio_user_indexes + + - metric_name: pg_db_statio_user_tables_heap_blks_read + type: counter + help: 'Number of disk blocks read from this table' + key_labels: + - relname + values: + - heap_blks_read + query_ref: _db_pg_statio_user_tables + + - metric_name: pg_db_statio_user_tables_heap_blks_hit + type: counter + help: 'Number of buffer hits in this table' + key_labels: + - relname + values: + - heap_blks_hit + query_ref: _db_pg_statio_user_tables + + - metric_name: pg_db_statio_user_tables_idx_blks_read + type: counter + help: 'Number of disk blocks read from all indexes on this table' + key_labels: + - relname + values: + - idx_blks_read + query_ref: _db_pg_statio_user_tables + + - metric_name: pg_db_statio_user_tables_idx_blks_hit + type: counter + help: 'Number of buffer hits in all indexes on this table' + key_labels: + - relname + values: + - idx_blks_hit + query_ref: _db_pg_statio_user_tables + + - metric_name: pg_db_statio_user_tables_toast_blks_read + type: counter + help: 'Number of disk blocks read from this table''s TOAST table (if any)' + key_labels: + - relname + values: + - toast_blks_read + query_ref: _db_pg_statio_user_tables + + - metric_name: pg_db_statio_user_tables_toast_blks_hit + type: counter + help: 'Number of buffer hits in this table''s TOAST table (if any)' + key_labels: + - relname + values: + - toast_blks_hit + query_ref: _db_pg_statio_user_tables + + - metric_name: pg_db_statio_user_tables_tidx_blks_read + type: counter + help: 'Number of disk blocks read from this table''s TOAST table indexes (if any)' + key_labels: + - relname + values: + - tidx_blks_read + query_ref: _db_pg_statio_user_tables + + - metric_name: pg_db_statio_user_tables_tidx_blks_hit + type: counter + help: 'Number of buffer hits in this table''s TOAST table indexes (if any)' + key_labels: + - relname + values: + - tidx_blks_hit + query_ref: _db_pg_statio_user_tables + + - metric_name: pg_db_size_bytes + help: 'Disk space used by the database' + type: gauge + values: + - size + query_ref: _db_pg_database_size + + queries: + - query_name: _db_pg_database_size + query: | + select pg_database_size(current_database()) as size + - query_name: _db_pg_stat_user_tables + query: | + SELECT + relname, + + COALESCE(seq_scan,0) as seq_scan, + COALESCE(seq_tup_read,0) as seq_tup_read, + + COALESCE(idx_scan,0) as idx_scan, + COALESCE(idx_tup_fetch,0) as idx_tup_fetch, + + COALESCE(n_tup_ins,0) as n_tup_ins, + COALESCE(n_tup_upd,0) as n_tup_upd, + COALESCE(n_tup_del,0) as n_tup_del, + COALESCE(n_tup_hot_upd,0) as n_tup_hot_upd, + + COALESCE(n_live_tup,0) as n_live_tup, + COALESCE(n_dead_tup,0) as n_dead_tup, + + COALESCE(n_mod_since_analyze,0) as n_mod_since_analyze, + COALESCE(n_ins_since_vacuum,0) as n_ins_since_vacuum, + + COALESCE(EXTRACT(EPOCH FROM last_vacuum), 0) as last_vacuum, + COALESCE(EXTRACT(EPOCH FROM last_autovacuum), 0) as last_autovacuum, + COALESCE(EXTRACT(EPOCH FROM last_analyze), 0) as last_analyze, + COALESCE(EXTRACT(EPOCH FROM last_autoanalyze), 0) as last_autoanalyze, + + COALESCE(vacuum_count,0) as vacuum_count, + COALESCE(autovacuum_count,0) as autovacuum_count, + COALESCE(analyze_count,0) as analyze_count, + COALESCE(autoanalyze_count,0) as autoanalyze_count, + + pg_total_relation_size(relid) as total_size + FROM + pg_stat_user_tables + + - query_name: _db_pg_stat_activity + query: | + SELECT + COUNT(*) as transactions, + MAX(EXTRACT(EPOCH FROM clock_timestamp() - pg_stat_activity.xact_start)) AS oldest_timestamp_seconds + FROM pg_stat_activity + WHERE state IS DISTINCT FROM 'idle' + AND query NOT LIKE 'autovacuum:%' + AND pg_stat_activity.xact_start IS NOT NULL; + + - query_name: _db_pg_statio_user_indexes + query: | + SELECT + relname, + indexrelname, + idx_blks_read, + idx_blks_hit + FROM pg_statio_user_indexes + + - query_name: _db_pg_statio_user_tables + query: | + SELECT + relname, + -- Number of disk blocks read from this table + coalesce(heap_blks_read,0) as heap_blks_read, + -- Number of buffer hits in this table + coalesce(heap_blks_hit,0) as heap_blks_hit, + -- Number of disk blocks read from all indexes on this table + coalesce(idx_blks_read,0) as idx_blks_read, + -- Number of buffer hits in all indexes on this table + coalesce(idx_blks_hit,0) as idx_blks_hit, + -- Number of disk blocks read from this table's TOAST table (if any) + coalesce(toast_blks_read, 0) as toast_blks_read, + -- Number of buffer hits in this table's TOAST table (if any) + coalesce(toast_blks_hit, 0) as toast_blks_hit, + -- Number of disk blocks read from this table's TOAST table indexes (if any) + coalesce(tidx_blks_read, 0) as tidx_blks_read, + -- Number of buffer hits in this table's TOAST table indexes (if any) + coalesce(tidx_blks_hit, 0) as tidx_blks_hit + FROM pg_statio_user_tables + + - collector_name: server + metrics: + + - metric_name: pg_server_wal_segments + type: gauge + help: 'Number of segments in the WAL directory' + values: + - segments + query_ref: _srv_pg_ls_waldir + + - metric_name: pg_server_wal_size_bytes + type: gauge + help: 'Size of the WAL directory' + values: + - size + query_ref: _srv_pg_ls_waldir + + - metric_name: pg_server_stat_bgwriter_checkpoints_timed + type: counter + help: 'Number of scheduled checkpoints that have been performed' + values: + - checkpoints_timed + query_ref: _srv_bgwriter + + - metric_name: pg_server_stat_bgwriter_checkpoints_req + type: counter + help: 'Number of requested checkpoints that have been performed' + values: + - checkpoints_req + query_ref: _srv_bgwriter + + - metric_name: pg_server_stat_bgwriter_checkpoint_write_time + type: counter + help: 'Total amount of time that has been spent in the portion of checkpoint processing where files are written to disk, in milliseconds' + values: + - checkpoint_write_time + query_ref: _srv_bgwriter + + - metric_name: pg_server_stat_bgwriter_checkpoint_sync_time + type: counter + help: 'Total amount of time that has been spent in the portion of checkpoint processing where files are synchronized to disk, in milliseconds' + values: + - checkpoint_sync_time + query_ref: _srv_bgwriter + + - metric_name: pg_server_stat_bgwriter_buffers_checkpoint + type: counter + help: 'Number of buffers written during checkpoints' + values: + - buffers_checkpoint + query_ref: _srv_bgwriter + + - metric_name: pg_server_stat_bgwriter_buffers_clean + type: counter + help: 'Number of buffers written by the background writer' + values: + - buffers_clean + query_ref: _srv_bgwriter + + - metric_name: pg_server_stat_bgwriter_maxwritten_clean + type: counter + help: 'Number of times the background writer stopped a cleaning scan because it had written too many buffers' + values: + - maxwritten_clean + query_ref: _srv_bgwriter + + - metric_name: pg_server_stat_bgwriter_buffers_backend + type: counter + help: 'Number of buffers written directly by a backend' + values: + - buffers_backend + query_ref: _srv_bgwriter + + - metric_name: pg_server_stat_bgwriter_buffers_backend_fsync + type: counter + help: 'Number of times a backend had to execute its own fsync call (normally the background writer handles those even when the backend does its own write)' + values: + - buffers_backend_fsync + query_ref: _srv_bgwriter + + - metric_name: pg_server_stat_bgwriter_buffers_alloc + type: counter + help: 'Number of buffers allocated' + values: + - buffers_alloc + query_ref: _srv_bgwriter + + - metric_name: pg_server_stat_database_numbackends + type: gauge + help: 'Number of backends currently connected to this database' + key_labels: + - datname + values: + - numbackends + query_ref: _srv_pg_stat_database + + - metric_name: pg_server_stat_database_xact_commit + type: counter + help: 'Number of transactions in this database that have been committed' + key_labels: + - datname + values: + - xact_commit + query_ref: _srv_pg_stat_database + + - metric_name: pg_server_stat_database_xact_rollback + type: counter + help: 'Number of transactions in this database that have been rolled back' + key_labels: + - datname + values: + - xact_rollback + query_ref: _srv_pg_stat_database + + - metric_name: pg_server_stat_database_blks_read + type: counter + help: 'Number of disk blocks read in this database' + key_labels: + - datname + values: + - blks_read + query_ref: _srv_pg_stat_database + + - metric_name: pg_server_stat_database_blks_hit + type: counter + help: 'Number of times disk blocks were found already in the buffer cache' + key_labels: + - datname + values: + - blks_hit + query_ref: _srv_pg_stat_database + + - metric_name: pg_server_stat_database_tup_returned + type: counter + help: 'Number of live rows fetched by sequential scans and index entries returned by index scans in this database' + key_labels: + - datname + values: + - tup_returned + query_ref: _srv_pg_stat_database + + - metric_name: pg_server_stat_database_tup_fetched + type: counter + help: 'Number of live rows fetched by index scans in this database' + key_labels: + - datname + values: + - tup_fetched + query_ref: _srv_pg_stat_database + + - metric_name: pg_server_stat_database_tup_inserted + type: counter + help: 'Number of rows inserted by queries in this database' + key_labels: + - datname + values: + - tup_inserted + query_ref: _srv_pg_stat_database + + - metric_name: pg_server_stat_database_tup_updated + type: counter + help: 'Number of rows updated by queries in this database' + key_labels: + - datname + values: + - tup_updated + query_ref: _srv_pg_stat_database + + - metric_name: pg_server_stat_database_tup_deleted + type: counter + help: 'Number of rows deleted by queries in this database' + key_labels: + - datname + values: + - tup_deleted + query_ref: _srv_pg_stat_database + + - metric_name: pg_server_stat_database_temp_files + type: counter + help: 'Number of temporary files created by queries in this database' + key_labels: + - datname + values: + - temp_files + query_ref: _srv_pg_stat_database + + - metric_name: pg_server_stat_database_temp_bytes + type: counter + help: 'Total amount of data written to temporary files by queries in this database' + key_labels: + - datname + values: + - temp_bytes + query_ref: _srv_pg_stat_database + + - metric_name: pg_server_stat_database_deadlocks + type: counter + help: 'Number of deadlocks detected in this database' + key_labels: + - datname + values: + - deadlocks + query_ref: _srv_pg_stat_database + + - metric_name: pg_server_stat_database_blk_read_time + type: counter + help: 'Time spent reading data file blocks by backends in this database, in milliseconds' + key_labels: + - datname + values: + - blk_read_time + query_ref: _srv_pg_stat_database + + - metric_name: pg_server_stat_database_blk_write_time + type: counter + help: 'Time spent writing data file blocks by backends in this database, in milliseconds' + key_labels: + - datname + values: + - blk_write_time + query_ref: _srv_pg_stat_database + + - metric_name: pg_server_stat_database_session_time + type: counter + help: 'Time spent by database sessions in this database, in milliseconds' + key_labels: + - datname + values: + - session_time + query_ref: _srv_pg_stat_database + + - metric_name: pg_server_stat_database_active_time + type: counter + help: 'Time spent executing SQL statements in this database, in milliseconds' + key_labels: + - datname + values: + - active_time + query_ref: _srv_pg_stat_database + + - metric_name: pg_server_stat_database_idle_in_transaction_time + type: counter + help: 'Time spent idling while in a transaction in this database, in milliseconds' + key_labels: + - datname + values: + - idle_in_transaction_time + query_ref: _srv_pg_stat_database + + - metric_name: pg_server_stat_database_sessions + type: gauge + help: 'Total number of sessions established to this database' + key_labels: + - datname + values: + - sessions + query_ref: _srv_pg_stat_database + + - metric_name: pg_server_stat_database_sessions_abandoned + type: counter + help: 'Number of database sessions to this database that were terminated because connection to the client was lost' + key_labels: + - datname + values: + - sessions_abandoned + query_ref: _srv_pg_stat_database + + - metric_name: pg_server_stat_database_sessions_fatal + type: counter + help: 'Number of database sessions to this database that were terminated by fatal errors' + key_labels: + - datname + values: + - sessions_fatal + query_ref: _srv_pg_stat_database + + - metric_name: pg_server_stat_database_sessions_killed + type: counter + help: 'Number of database sessions to this database that were terminated by operator intervention' + key_labels: + - datname + values: + - sessions_killed + query_ref: _srv_pg_stat_database + + - metric_name: pg_server_locks + type: gauge + help: 'Number of locks held in this database' + key_labels: + - datname + - mode + values: + - count + query_ref: _srv_pg_locks + + queries: + - query_name: _srv_pg_locks + query: | + SELECT + pg_database.datname as datname, + -- Name of the lock mode + tmp.mode as mode, + COALESCE(count, 0) as count + FROM + ( + VALUES + ('accesssharelock'), + ('rowsharelock'), + ('rowexclusivelock'), + ('shareupdateexclusivelock'), + ('sharelock'), + ('sharerowexclusivelock'), + ('exclusivelock'), + ('accessexclusivelock'), + ('sireadlock') + ) AS tmp(mode) + CROSS JOIN pg_database + LEFT JOIN ( + SELECT + database, + lower(mode) AS mode, + count(*) AS count + FROM + pg_locks + WHERE + database IS NOT NULL + GROUP BY + database, + lower(mode) + ) AS tmp2 ON tmp.mode = tmp2.mode + and pg_database.oid = tmp2.database + + - query_name: _srv_bgwriter + query: | + SELECT + /* The following 5 are moved to pg_stat_checkpointer in postgres 17 */ + + checkpoints_timed + ,checkpoints_req + ,checkpoint_write_time + ,checkpoint_sync_time + ,buffers_checkpoint + + ,buffers_clean + ,maxwritten_clean + ,buffers_backend + ,buffers_backend_fsync + ,buffers_alloc + FROM pg_stat_bgwriter; + + - query_name: _srv_pg_stat_database + query: | + SELECT + COALESCE(datname, 'shared-objects') as datname, + COALESCE(numbackends, 0) as numbackends, + xact_commit, + xact_rollback, + blks_read, + blks_hit, + tup_returned, + tup_fetched, + tup_inserted, + tup_updated, + tup_deleted, + temp_files, + temp_bytes, + deadlocks, + blk_read_time, + blk_write_time, + session_time, + active_time, + idle_in_transaction_time, + sessions, + sessions_abandoned, + sessions_fatal, + sessions_killed + FROM pg_stat_database + + - query_name: _srv_pg_ls_waldir + query: | + SELECT + COUNT(*) AS segments, + SUM(size) AS size + FROM pg_ls_waldir() + diff --git a/examples/sql_exporter.yml b/examples/sql_exporter.yml index 1595a4bc..5c2ba4bc 100644 --- a/examples/sql_exporter.yml +++ b/examples/sql_exporter.yml @@ -1,5 +1,7 @@ # Global defaults. global: + # If scrape_timeout <= 0, no timeout is set unless Prometheus provides one. The default is 10s. + scrape_timeout: 10s # Subtracted from Prometheus' scrape_timeout to give us some headroom and prevent Prometheus from timing out first. scrape_timeout_offset: 500ms # Minimum interval between collector runs: by default (0s) collectors are executed on every scrape. @@ -18,11 +20,13 @@ global: target: # Data source name always has a URI schema that matches the driver name. In some cases (e.g. MySQL) # the schema gets dropped or replaced to match the driver expected DSN format. - data_source_name: 'sqlserver://prom_user:prom_password@dbserver1.example.com:1433' + data_source_name: 'sqlserver://prom_user:prom_password@dbserver1.example.com:1433/dbname' # Collectors (referenced by name) to execute on the target. - collectors: [mssql_standard] + # Glob patterns are supported (see for syntax). + collectors: [mssql_*] # Collector files specifies a list of globs. One collector definition is read from each matching file. +# Glob patterns are supported (see for syntax). collector_files: - "*.collector.yml" diff --git a/exporter.go b/exporter.go index 5cfa85aa..a9ab1408 100644 --- a/exporter.go +++ b/exporter.go @@ -3,20 +3,23 @@ package sql_exporter import ( "context" "errors" - "flag" "fmt" - "os" + "log/slog" + "strings" "sync" "github.com/burningalchemist/sql_exporter/config" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" + "google.golang.org/protobuf/proto" ) -const envDsnOverride = "SQLEXPORTER_TARGET_DSN" - -var dsnOverride = flag.String("config.data-source-name", "", "Data source name to override the value in the configuration file with.") +var ( + SvcRegistry = prometheus.NewRegistry() + svcMetricLabels = []string{"job", "target", "collector", "query"} + scrapeErrorsMetric *prometheus.CounterVec +) // Exporter is a prometheus.Gatherer that gathers SQL metrics from targets and merges them with the default registry. type Exporter interface { @@ -26,12 +29,18 @@ type Exporter interface { WithContext(context.Context) Exporter // Config returns the Exporter's underlying Config object. Config() *config.Config + // UpdateTarget updates the targets field UpdateTarget([]Target) + // SetJobFilters sets the jobFilters field + SetJobFilters([]string) + // DropErrorMetrics resets the scrape_errors_total metric + DropErrorMetrics() } type exporter struct { - config *config.Config - targets []Target + config *config.Config + targets []Target + jobFilters []string ctx context.Context } @@ -43,20 +52,17 @@ func NewExporter(configFile string) (Exporter, error) { return nil, err } - if val, ok := os.LookupEnv(envDsnOverride); ok { - *dsnOverride = val - } // Override the DSN if requested (and in single target mode). - if *dsnOverride != "" { + if config.DsnOverride != "" { if len(c.Jobs) > 0 { - return nil, fmt.Errorf("the config.data-source-name flag (value %q) only applies in single target mode", *dsnOverride) + return nil, errors.New("the config.data-source-name flag only applies in single target mode") } - c.Target.DSN = config.Secret(*dsnOverride) + c.Target.DSN = config.Secret(config.DsnOverride) } var targets []Target if c.Target != nil { - target, err := NewTarget("", "", string(c.Target.DSN), c.Target.Collectors(), nil, c.Globals) + target, err := NewTarget("", c.Target.Name, "", string(c.Target.DSN), c.Target.Collectors(), nil, c.Globals, c.Target.EnablePing) if err != nil { return nil, err } @@ -75,18 +81,22 @@ func NewExporter(configFile string) (Exporter, error) { } } + scrapeErrorsMetric = registerScrapeErrorMetric() + return &exporter{ - config: c, - targets: targets, - ctx: context.Background(), + config: c, + targets: targets, + jobFilters: []string{}, + ctx: context.Background(), }, nil } func (e *exporter) WithContext(ctx context.Context) Exporter { return &exporter{ - config: e.config, - targets: e.targets, - ctx: ctx, + config: e.config, + targets: e.targets, + jobFilters: e.jobFilters, + ctx: ctx, } } @@ -97,6 +107,13 @@ func (e *exporter) Gather() ([]*dto.MetricFamily, error) { errs prometheus.MultiError ) + // Filter out jobs that are not in the jobFilters list + e.filterTargets(e.jobFilters) + + if len(e.targets) == 0 { + return nil, errors.New("no targets found") + } + var wg sync.WaitGroup wg.Add(len(e.targets)) for _, t := range e.targets { @@ -124,6 +141,14 @@ func (e *exporter) Gather() ([]*dto.MetricFamily, error) { dtoMetric := &dto.Metric{} if err := metric.Write(dtoMetric); err != nil { errs = append(errs, err) + if err.Context() != "" { + ctxLabels := parseContextLog(err.Context()) + values := make([]string, len(svcMetricLabels)) + for i, label := range svcMetricLabels { + values[i] = ctxLabels[label] + } + scrapeErrorsMetric.WithLabelValues(values...).Inc() + } continue } metricDesc := metric.Desc() @@ -154,11 +179,70 @@ func (e *exporter) Gather() ([]*dto.MetricFamily, error) { return result, errs } +func (e *exporter) filterTargets(jf []string) { + if len(jf) > 0 { + var filteredTargets []Target + for _, target := range e.targets { + for _, jobFilter := range jf { + if jobFilter == target.JobGroup() { + filteredTargets = append(filteredTargets, target) + break + } + } + } + if len(filteredTargets) == 0 { + slog.Error("No targets found for job filters. Nothing to scrape.") + } + e.targets = filteredTargets + } +} + // Config implements Exporter. func (e *exporter) Config() *config.Config { return e.config } +// UpdateTarget implements Exporter. func (e *exporter) UpdateTarget(target []Target) { e.targets = target } + +// SetJobFilters implements Exporter. +func (e *exporter) SetJobFilters(filters []string) { + e.jobFilters = filters +} + +// DropErrorMetrics implements Exporter. +func (e *exporter) DropErrorMetrics() { + scrapeErrorsMetric.Reset() + slog.Debug("Dropped scrape_errors_total metric") +} + +// registerScrapeErrorMetric registers the metrics for the exporter itself. +func registerScrapeErrorMetric() *prometheus.CounterVec { + scrapeErrors := prometheus.NewCounterVec(prometheus.CounterOpts{ + Name: "scrape_errors_total", + Help: "Total number of scrape errors per job, target, collector and query", + }, svcMetricLabels) + SvcRegistry.MustRegister(scrapeErrors) + return scrapeErrors +} + +// split comma separated list of key=value pairs and return a map of key value pairs +func parseContextLog(list string) map[string]string { + m := make(map[string]string) + for _, item := range strings.Split(list, ",") { + parts := strings.SplitN(item, "=", 2) + m[parts[0]] = parts[1] + } + return m +} + +// Leading comma appears when previous parameter is undefined, which is a side-effect of running in single target mode. +// Let's trim to avoid confusions. +func TrimMissingCtx(logContext string) string { + if strings.HasPrefix(logContext, ",") { + logContext = strings.TrimLeft(logContext, ", ") + } + return logContext +} diff --git a/go.mod b/go.mod index 82d158db..03e3205a 100644 --- a/go.mod +++ b/go.mod @@ -1,65 +1,84 @@ module github.com/burningalchemist/sql_exporter -go 1.18 +go 1.23.0 require ( + github.com/aws/aws-sdk-go-v2 v1.36.5 + github.com/aws/aws-sdk-go-v2/config v1.29.17 + github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.35.7 github.com/databricks/databricks-sql-go v1.0.1-0.20230105210901-69b282787450 - github.com/kardianos/minwinsvc v1.0.0 - github.com/prometheus/client_golang v1.13.0 - github.com/prometheus/client_model v0.2.0 - github.com/prometheus/common v0.37.0 - github.com/prometheus/exporter-toolkit v0.7.1 - github.com/trinodb/trino-go-client v0.309.0 - github.com/xo/dburl v0.12.4 - google.golang.org/protobuf v1.28.1 + github.com/kardianos/minwinsvc v1.0.2 + github.com/prometheus/client_golang v1.22.0 + github.com/prometheus/client_model v0.6.2 + github.com/prometheus/common v0.65.0 + github.com/prometheus/exporter-toolkit v0.14.0 + github.com/sethvargo/go-envconfig v1.3.0 + github.com/trinodb/trino-go-client v0.326.0 + github.com/xo/dburl v0.23.8 + google.golang.org/protobuf v1.36.6 gopkg.in/yaml.v3 v3.0.1 - k8s.io/klog/v2 v2.70.1 ) require ( - github.com/apache/thrift v0.17.0 // indirect - github.com/beorn7/perks v1.0.1 // indirect - github.com/cespare/xxhash/v2 v2.1.2 // indirect - github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/apache/thrift v0.21.0 // indirect github.com/dnephin/pflag v1.0.7 // indirect - github.com/fatih/color v1.13.0 // indirect + github.com/docker/docker v28.2.2+incompatible // indirect + github.com/fatih/color v1.15.0 // indirect github.com/fsnotify/fsnotify v1.5.4 // indirect - github.com/go-kit/log v0.2.0 // indirect - github.com/go-logfmt/logfmt v0.5.1 // indirect - github.com/golang/protobuf v1.5.2 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect github.com/hashicorp/go-cleanhttp v0.5.1 // indirect github.com/hashicorp/go-retryablehttp v0.7.1 // indirect github.com/hashicorp/go-uuid v1.0.3 // indirect - github.com/jcmturner/gofork v1.0.0 // indirect + github.com/jcmturner/gofork v1.7.6 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.19 // indirect + github.com/opencontainers/image-spec v1.1.1 // indirect + github.com/rogpeppe/go-internal v1.12.0 // indirect + github.com/rs/zerolog v1.28.0 // indirect + gotest.tools/gotestsum v1.8.2 // indirect +) + +require ( + github.com/aws/aws-sdk-go-v2/credentials v1.17.70 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.32 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.36 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.36 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.4 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.17 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.25.5 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.3 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.34.0 // indirect + github.com/aws/smithy-go v1.22.4 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/coreos/go-systemd/v22 v22.5.0 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/jcmturner/aescts/v2 v2.0.0 // indirect + github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect + github.com/jcmturner/goidentity/v6 v6.0.1 // indirect + github.com/jcmturner/gokrb5/v8 v8.4.4 // indirect + github.com/jcmturner/rpc/v2 v2.0.3 // indirect github.com/jpillora/backoff v1.0.0 // indirect - github.com/mattn/go-colorable v0.1.12 // indirect - github.com/mattn/go-isatty v0.0.16 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect + github.com/klauspost/compress v1.18.0 // indirect + github.com/mdlayher/socket v0.4.1 // indirect + github.com/mdlayher/vsock v1.2.1 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect + github.com/pierrec/lz4 v2.6.1+incompatible // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/prometheus/procfs v0.8.0 // indirect - github.com/rs/zerolog v1.28.0 // indirect - golang.org/x/crypto v0.0.0-20220919173607-35f4265a4bc0 // indirect - golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect - golang.org/x/net v0.0.0-20220920203100-d0c6ba3f52d9 // indirect - golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b // indirect - golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f // indirect - golang.org/x/sys v0.0.0-20220919091848-fb04ddd9f9c8 // indirect - golang.org/x/term v0.0.0-20220526004731-065cf7ba2467 // indirect - golang.org/x/text v0.3.7 // indirect - golang.org/x/tools v0.1.11 // indirect - google.golang.org/appengine v1.6.6 // indirect - gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect - gopkg.in/jcmturner/aescts.v1 v1.0.1 // indirect - gopkg.in/jcmturner/dnsutils.v1 v1.0.1 // indirect - gopkg.in/jcmturner/gokrb5.v6 v6.1.1 // indirect - gopkg.in/jcmturner/rpc.v1 v1.1.0 // indirect + github.com/prometheus/procfs v0.15.1 // indirect + golang.org/x/crypto v0.39.0 // indirect + golang.org/x/mod v0.25.0 // indirect + golang.org/x/net v0.41.0 // indirect + golang.org/x/oauth2 v0.30.0 // indirect + golang.org/x/sync v0.15.0 // indirect + golang.org/x/sys v0.33.0 // indirect + golang.org/x/term v0.32.0 // indirect + golang.org/x/text v0.26.0 // indirect + golang.org/x/tools v0.33.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect - gotest.tools/gotestsum v1.8.2 // indirect ) -replace k8s.io/klog/v2 => github.com/simonpasquier/klog-gokit/v3 v3.1.0 - -replace github.com/xo/dburl v0.12.4 => github.com/a-monteiro/dburl v0.13.1 +replace github.com/xo/dburl v0.23.8 => github.com/a-monteiro/dburl v0.23.8-fork diff --git a/go.sum b/go.sum index f23593ee..ff4d2642 100644 --- a/go.sum +++ b/go.sum @@ -1,81 +1,68 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= -github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= -github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/Microsoft/go-winio v0.5.2 h1:a9IhgEQBCUEk6QCdml9CiJGhAws+YwffDHEMp1VMrpA= -github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= +dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= +dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= +github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw= github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= -github.com/a-monteiro/dburl v0.13.1 h1:/5H9lNTfTEP5qBPcaydseuzXZLXRy3wRrEOEMo0A+gY= -github.com/a-monteiro/dburl v0.13.1/go.mod h1:FssZaYwK7Ft/Wg6VqMJV6I0umR8bTv3Rktbi9bZ4PGo= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= -github.com/apache/thrift v0.17.0 h1:cMd2aj52n+8VoAtvSvLn4kDC3aZ6IAkBuqWQ2IDu7wo= -github.com/apache/thrift v0.17.0/go.mod h1:OLxhMRJxomX+1I/KUw03qoV3mMz16BwaKI+d4fPBx7Q= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/a-monteiro/dburl v0.23.8-fork h1:O2+6KeAFE1znxOWLgbEap9Cdx6nAbEFIn4tKOfw+GNs= +github.com/a-monteiro/dburl v0.23.8-fork/go.mod h1:tgY/JvFUyXzD4b5S2Tx3e7A+C5Kf6IB+3GE/kcKwMAU= +github.com/ahmetb/dlog v0.0.0-20170105205344-4fb5f8204f26 h1:3YVZUqkoev4mL+aCwVOSWV4M7pN+NURHL38Z2zq5JKA= +github.com/ahmetb/dlog v0.0.0-20170105205344-4fb5f8204f26/go.mod h1:ymXt5bw5uSNu4jveerFxE0vNYxF8ncqbptntMaFMg3k= +github.com/apache/thrift v0.21.0 h1:tdPmh/ptjE1IJnhbhrcl2++TauVjy242rkV/UzJChnE= +github.com/apache/thrift v0.21.0/go.mod h1:W1H8aR/QRtYNvrPeFXBtobyRkd0/YVhTc6i07XIAgDw= +github.com/aws/aws-sdk-go v1.55.6 h1:cSg4pvZ3m8dgYcgqB97MrcdjUmZ1BeMYKUxMMB89IPk= +github.com/aws/aws-sdk-go v1.55.6/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= +github.com/aws/aws-sdk-go-v2 v1.36.5 h1:0OF9RiEMEdDdZEMqF9MRjevyxAQcf6gY+E7vwBILFj0= +github.com/aws/aws-sdk-go-v2 v1.36.5/go.mod h1:EYrzvCCN9CMUTa5+6lf6MM4tq3Zjp8UhSGR/cBsjai0= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10 h1:zAybnyUQXIZ5mok5Jqwlf58/TFE7uvd3IAsa1aF9cXs= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10/go.mod h1:qqvMj6gHLR/EXWZw4ZbqlPbQUyenf4h82UQUlKc+l14= +github.com/aws/aws-sdk-go-v2/config v1.29.17 h1:jSuiQ5jEe4SAMH6lLRMY9OVC+TqJLP5655pBGjmnjr0= +github.com/aws/aws-sdk-go-v2/config v1.29.17/go.mod h1:9P4wwACpbeXs9Pm9w1QTh6BwWwJjwYvJ1iCt5QbCXh8= +github.com/aws/aws-sdk-go-v2/credentials v1.17.70 h1:ONnH5CM16RTXRkS8Z1qg7/s2eDOhHhaXVd72mmyv4/0= +github.com/aws/aws-sdk-go-v2/credentials v1.17.70/go.mod h1:M+lWhhmomVGgtuPOhO85u4pEa3SmssPTdcYpP/5J/xc= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.32 h1:KAXP9JSHO1vKGCr5f4O6WmlVKLFFXgWYAGoJosorxzU= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.32/go.mod h1:h4Sg6FQdexC1yYG9RDnOvLbW1a/P986++/Y/a+GyEM8= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.36 h1:SsytQyTMHMDPspp+spo7XwXTP44aJZZAC7fBV2C5+5s= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.36/go.mod h1:Q1lnJArKRXkenyog6+Y+zr7WDpk4e6XlR6gs20bbeNo= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.36 h1:i2vNHQiXUvKhs3quBR6aqlgJaiaexz/aNvdCktW/kAM= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.36/go.mod h1:UdyGa7Q91id/sdyHPwth+043HhmP6yP9MBHgbZM0xo8= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 h1:bIqFDwgGXXN1Kpp99pDOdKMTTb5d2KyU5X/BZxjOkRo= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3/go.mod h1:H5O/EsxDWyU+LP/V8i5sm8cxoZgc2fdNR9bxlOFrQTo= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.34 h1:ZNTqv4nIdE/DiBfUUfXcLZ/Spcuz+RjeziUtNJackkM= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.34/go.mod h1:zf7Vcd1ViW7cPqYWEHLHJkS50X0JS2IKz9Cgaj6ugrs= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.4 h1:CXV68E2dNqhuynZJPB80bhPQwAKqBWVer887figW6Jc= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.4/go.mod h1:/xFi9KtvBXP97ppCz1TAEvU1Uf66qvid89rbem3wCzQ= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.0 h1:lguz0bmOoGzozP9XfRJR1QIayEYo+2vP/No3OfLF0pU= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.0/go.mod h1:iu6FSzgt+M2/x3Dk8zhycdIcHjEFb36IS8HVUVFoMg0= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.17 h1:t0E6FzREdtCsiLIoLCWsYliNsRBgyGD/MCK571qk4MI= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.17/go.mod h1:ygpklyoaypuyDvOM5ujWGrYWpAK3h7ugnmKCU/76Ys4= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.15 h1:moLQUoVq91LiqT1nbvzDukyqAlCv89ZmwaHw/ZFlFZg= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.15/go.mod h1:ZH34PJUc8ApjBIfgQCFvkWcUDBtl/WTD+uiYHjd8igA= +github.com/aws/aws-sdk-go-v2/service/s3 v1.79.0 h1:OIw2nryEApESTYI5deCZGcq4Gvz8DBAt4tJlNyg3v5o= +github.com/aws/aws-sdk-go-v2/service/s3 v1.79.0/go.mod h1:U5SNqwhXB3Xe6F47kXvWihPl/ilGaEDe8HD/50Z9wxc= +github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.35.7 h1:d+mnMa4JbJlooSbYQfrJpit/YINaB30JEVgrhtjZneA= +github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.35.7/go.mod h1:1X1NotbcGHH7PCQJ98PsExSxsJj/VWzz8MfFz43+02M= +github.com/aws/aws-sdk-go-v2/service/sso v1.25.5 h1:AIRJ3lfb2w/1/8wOOSqYb9fUKGwQbtysJ2H1MofRUPg= +github.com/aws/aws-sdk-go-v2/service/sso v1.25.5/go.mod h1:b7SiVprpU+iGazDUqvRSLf5XmCdn+JtT1on7uNL6Ipc= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.3 h1:BpOxT3yhLwSJ77qIY3DoHAQjZsc4HEGfMCE4NGy3uFg= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.3/go.mod h1:vq/GQR1gOFLquZMSrxUK/cpvKCNVYibNyJ1m7JrU88E= +github.com/aws/aws-sdk-go-v2/service/sts v1.34.0 h1:NFOJ/NXEGV4Rq//71Hs1jC/NvPs1ezajK+yQmkwnPV0= +github.com/aws/aws-sdk-go-v2/service/sts v1.34.0/go.mod h1:7ph2tGpfQvwzgistp2+zga9f+bCjlQJPkPUmMgDSD7w= +github.com/aws/smithy-go v1.22.4 h1:uqXzVZNuNexwc/xrh6Tb56u89WDlJY6HS+KC0S4QSjw= +github.com/aws/smithy-go v1.22.4/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= -github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= -github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U= -github.com/containerd/continuity v0.3.0 h1:nisirsYROK15TAMVukJOUyGJjz4BNQJBVsNvAXZJ/eg= -github.com/containerd/continuity v0.3.0/go.mod h1:wJEAIwKOm/pBZuBd0JmeTvnLquTB1Ag8espWhkykbPM= -github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/containerd/continuity v0.4.3 h1:6HVkalIp+2u1ZLH1J/pYX2oBVXlJZvh1X1A7bEZ9Su8= +github.com/containerd/continuity v0.4.3/go.mod h1:F6PTNCKepoxEaXLQp3wDAjygEnImnZ/7o4JzpodfroQ= github.com/coreos/go-systemd/v22 v22.3.3-0.20220203105225-a9a7ef127534/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= -github.com/databricks/databricks-sql-go v0.2.2 h1:D4uy8m+KcfUwHYuwGo6Lnq8Z3e6SrHN52gql3eqHQgc= -github.com/databricks/databricks-sql-go v0.2.2/go.mod h1:qKEsB/mPgDM9EvhF2GzivfUEWtSxsM4MFury5GP1U5w= +github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= +github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/databricks/databricks-sql-go v1.0.1-0.20230105210901-69b282787450 h1:j+lJBSJ1xB7/eWDBUNpk56wOJJRlzb8iK+vP0F+AIlQ= github.com/databricks/databricks-sql-go v1.0.1-0.20230105210901-69b282787450/go.mod h1:qKEsB/mPgDM9EvhF2GzivfUEWtSxsM4MFury5GP1U5w= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -84,612 +71,236 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dnephin/pflag v1.0.7 h1:oxONGlWxhmUct0YzKTgrpQv9AUA1wtPBn7zuSjJqptk= github.com/dnephin/pflag v1.0.7/go.mod h1:uxE91IoWURlOiTUIA8Mq5ZZkAv3dPUfZNaT80Zm7OQE= -github.com/docker/cli v20.10.14+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= -github.com/docker/cli v20.10.17+incompatible h1:eO2KS7ZFeov5UJeaDmIs1NFEDRf32PaqRpvoEkKBy5M= -github.com/docker/cli v20.10.17+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= -github.com/docker/docker v20.10.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v20.10.17+incompatible h1:JYCuMrWaVNophQTOrMMoSwudOVEfcegoZZrleKc1xwE= -github.com/docker/docker v20.10.17+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= -github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= -github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= -github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= +github.com/docker/cli v26.1.4+incompatible h1:I8PHdc0MtxEADqYJZvhBrW9bo8gawKwwenxRM7/rLu8= +github.com/docker/cli v26.1.4+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/docker v28.2.2+incompatible h1:CjwRSksz8Yo4+RmQ339Dp/D2tGO5JxwYeqtMOEe0LDw= +github.com/docker/docker v28.2.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= +github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= -github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= +github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs= +github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= -github.com/go-kit/log v0.2.0 h1:7i2K3eKTos3Vc0enKCfnVcgHh2olr/MyfboYq7cAcFw= -github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA= -github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= -github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= +github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/gorilla/securecookie v1.1.1 h1:miw7JPhV+b/lAHSXz4qd/nN9jRiAFV5FwjeKyCS8BvQ= +github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= +github.com/gorilla/sessions v1.2.1 h1:DHd3rPN5lE3Ts3D8rKkQ8x/0kqfeNmBAaiSi+o7FsgI= +github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-hclog v0.9.2 h1:CG6TE5H9/JXsFWJCfoIVpKFIkFe6ysEuHirp4DxCsHI= github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-retryablehttp v0.7.1 h1:sUiuQAnLlbvmExtFQs72iFW/HXeUn8Z1aJLQ4LJJbTQ= github.com/hashicorp/go-retryablehttp v0.7.1/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= +github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= -github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= -github.com/jcmturner/gofork v1.0.0 h1:J7uCkflzTEhUZ64xqKnkDxq3kzc96ajM1Gli5ktUem8= -github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= +github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8= +github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs= +github.com/jcmturner/dnsutils/v2 v2.0.0 h1:lltnkeZGL0wILNvrNiVCR6Ro5PGU/SeBvVO/8c/iPbo= +github.com/jcmturner/dnsutils/v2 v2.0.0/go.mod h1:b0TnjGOvI/n42bZa+hmXL+kFJZsFT7G4t3HTlQ184QM= +github.com/jcmturner/gofork v1.7.6 h1:QH0l3hzAU1tfT3rZCnW5zXl+orbkNMMRGJfdJjHVETg= +github.com/jcmturner/gofork v1.7.6/go.mod h1:1622LH6i/EZqLloHfE7IeZ0uEJwMSUyQ/nDd82IeqRo= +github.com/jcmturner/goidentity/v6 v6.0.1 h1:VKnZd2oEIMorCTsFBnJWbExfNN7yZr3EhJAxwOkZg6o= +github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg= +github.com/jcmturner/gokrb5/v8 v8.4.4 h1:x1Sv4HaTpepFkXbt2IkL29DXRf8sOfZXo8eRKh687T8= +github.com/jcmturner/gokrb5/v8 v8.4.4/go.mod h1:1btQEpgT6k+unzCwX1KdWMEwPPkkgBtP+F6aCACiMrs= +github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY= +github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= -github.com/kardianos/minwinsvc v1.0.0 h1:+JfAi8IBJna0jY2dJGZqi7o15z13JelFIklJCAENALA= -github.com/kardianos/minwinsvc v1.0.0/go.mod h1:Bgd0oc+D0Qo3bBytmNtyRKVlp85dAloLKhfxanPFFRc= -github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/lib/pq v0.0.0-20180327071824-d34b9ff171c2/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/kardianos/minwinsvc v1.0.2 h1:JmZKFJQrmTGa/WiW+vkJXKmfzdjabuEW4Tirj5lLdR0= +github.com/kardianos/minwinsvc v1.0.2/go.mod h1:LUZNYhNmxujx2tR7FbdxqYJ9XDDoCd3MQcl1o//FWl4= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= -github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= -github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= -github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= +github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mdlayher/socket v0.4.1 h1:eM9y2/jlbs1M615oshPQOHZzj6R6wMT7bX5NPiQvn2U= +github.com/mdlayher/socket v0.4.1/go.mod h1:cAqeGjoufqdxWkD7DkpyS+wcefOtmu5OQ8KuoJGIReA= +github.com/mdlayher/vsock v1.2.1 h1:pC1mTJTvjo1r9n9fbm7S1j04rCgCzhCOS5DY0zqHlnQ= +github.com/mdlayher/vsock v1.2.1/go.mod h1:NRfCibel++DgeMD8z/hP+PPTjlNJsdPOmxcnENvE+SE= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU= -github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc= -github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc= -github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= +github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= +github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= -github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/runc v1.1.2/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6iptFpuvbbKqc= -github.com/opencontainers/runc v1.1.3 h1:vIXrkId+0/J2Ymu2m7VjGvbSlAId9XNRPhn2p4b+d8w= -github.com/opencontainers/runc v1.1.3/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg= -github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= -github.com/ory/dockertest/v3 v3.9.1 h1:v4dkG+dlu76goxMiTT2j8zV7s4oPPEppKT8K8p2f1kY= -github.com/ory/dockertest/v3 v3.9.1/go.mod h1:42Ir9hmvaAPm0Mgibk6mBPi7SFvTXxEcnztDYOJ//uM= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= +github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= +github.com/opencontainers/runc v1.1.13 h1:98S2srgG9vw0zWcDpFMn5TRrh8kLxa/5OFUstuUhmRs= +github.com/opencontainers/runc v1.1.13/go.mod h1:R016aXacfp/gwQBYw2FDGa9m+n6atbLWrYY8hNMT/sA= +github.com/ory/dockertest/v3 v3.11.0 h1:OiHcxKAvSDUwsEVh2BjxQQc/5EHz9n0va9awCtNGuyA= +github.com/ory/dockertest/v3 v3.11.0/go.mod h1:VIPxS1gwT9NpPOrfD3rACs8Y9Z7yhzO4SB194iUDnUI= +github.com/pierrec/lz4 v2.6.1+incompatible h1:9UY3+iC23yxF0UfGaYrGplQ+79Rg+h/q9FV9ix19jjM= +github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= -github.com/prometheus/client_golang v1.13.0 h1:b71QUfeo5M8gq2+evJdTPfZhYMAU0uKPkyPJ7TPsloU= -github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= -github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE= -github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= -github.com/prometheus/exporter-toolkit v0.7.1 h1:c6RXaK8xBVercEeUQ4tRNL8UGWzDHfvj9dseo1FcK1Y= -github.com/prometheus/exporter-toolkit v0.7.1/go.mod h1:ZUBIj498ePooX9t/2xtDjeQYwvRpiPP2lh5u4iblj2g= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= -github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= +github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= +github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= +github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= +github.com/prometheus/common v0.65.0 h1:QDwzd+G1twt//Kwj/Ww6E9FQq1iVMmODnILtW1t2VzE= +github.com/prometheus/common v0.65.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8= +github.com/prometheus/exporter-toolkit v0.14.0 h1:NMlswfibpcZZ+H0sZBiTjrA3/aBFHkNZqE+iCj5EmRg= +github.com/prometheus/exporter-toolkit v0.14.0/go.mod h1:Gu5LnVvt7Nr/oqTBUC23WILZepW0nffNo10XdhQcwWA= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/rs/xid v1.4.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= github.com/rs/zerolog v1.28.0 h1:MirSo27VyNi7RJYP3078AA1+Cyzd2GB66qy3aUHvsWY= github.com/rs/zerolog v1.28.0/go.mod h1:NILgTygv/Uej1ra5XxGf82ZFSLk58MFGAUS2o6usyD0= -github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= -github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= -github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/simonpasquier/klog-gokit/v3 v3.1.0 h1:xQGqjZdgo1lFA4eZ9PcGnKKXgIPz9t+jc25q/fXooIE= -github.com/simonpasquier/klog-gokit/v3 v3.1.0/go.mod h1:+WRhGy707Lp2Q4r727m9Oc7FxazOHgW76FIyCr23nus= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= -github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sethvargo/go-envconfig v1.3.0 h1:gJs+Fuv8+f05omTpwWIu6KmuseFAXKrIaOZSh8RMt0U= +github.com/sethvargo/go-envconfig v1.3.0/go.mod h1:JLd0KFWQYzyENqnEPWWZ49i4vzZo/6nRidxI8YvGiHw= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= -github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= -github.com/trinodb/trino-go-client v0.308.0 h1:JXO1Kt8XktqCG5cuFmArqlwz1OiBAYHhNm8cggn12vI= -github.com/trinodb/trino-go-client v0.308.0/go.mod h1:b3wyshZj60DHd7JsULwPvaq+JD6e3v+tQugVKZ+SqBw= -github.com/trinodb/trino-go-client v0.309.0 h1:6TJDdUE69kcEmhv5dcY4IuJJ0MQDtjBrc/08T29SFbg= -github.com/trinodb/trino-go-client v0.309.0/go.mod h1:b3wyshZj60DHd7JsULwPvaq+JD6e3v+tQugVKZ+SqBw= -github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= -github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= -github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/trinodb/trino-go-client v0.326.0 h1:YBTww/DACsNFIBFh9SfFra3Q/3H9Cs/dnCkWoIYjMZk= +github.com/trinodb/trino-go-client v0.326.0/go.mod h1:e/nck9W6hy+9bbyZEpXKFlNsufn3lQGpUgDL1d5f1FI= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220919173607-35f4265a4bc0 h1:a5Yg6ylndHHYJqIPrdq0AhvR6KTvDTAvgBtaidhEevY= -golang.org/x/crypto v0.0.0-20220919173607-35f4265a4bc0/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= +golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM= +golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/mod v0.25.0 h1:n7a+ZbQKQA/Ysbyb0/6IbB1H/X41mKgbhfv7AfG/44w= +golang.org/x/mod v0.25.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220706163947-c90051bbdb60/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220920203100-d0c6ba3f52d9 h1:asZqf0wXastQr+DudYagQS8uBO8bHKeYD1vbAvGmFL8= -golang.org/x/net v0.0.0-20220920203100-d0c6ba3f52d9/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b h1:clP8eMhB30EHdc0bd2Twtq6kgU7yl5ub2cQLSdrv1Dg= -golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw= +golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA= +golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= +golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f h1:Ax0t5p6N38Ga0dThY21weqDEyz2oklo4IvDkpigvkD8= golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8= +golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220405210540-1e041c57c461/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220708085239-5a0f0661e09d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220919091848-fb04ddd9f9c8 h1:h+EGohizhe9XlX18rfpa8k8RAc5XyaeamM+0VHRd4lc= -golang.org/x/sys v0.0.0-20220919091848-fb04ddd9f9c8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= +golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.0.0-20220526004731-065cf7ba2467 h1:CBpWXWQpIRjzmkkA+M7q9Fqnwd2mZr3AFqexg8YTfoM= golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.32.0 h1:DR4lr0TjUs3epypdhTOkMmuF5CDFJ/8pOnbzMZPQ7bg= +golang.org/x/term v0.32.0/go.mod h1:uZG1FhGx848Sqfsq4/DlJr3xGGsYMu/L5GW4abiaEPQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M= +golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= -golang.org/x/tools v0.1.11 h1:loJ25fNOEhSXfHrpoGj91eCUThwdNX6u24rO1xnNteY= golang.org/x/tools v0.1.11/go.mod h1:SgwaegtQh8clINPpECJMqnxLv9I09HLqnW3RMqW0CA4= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.33.0 h1:4qz2S3zmRxbGIhDIAgjxvFutSvH5EfnsYrRBj0UI0bc= +golang.org/x/tools v0.33.0/go.mod h1:CIJMaWEY88juyUfo7UbgPqbC8rU2OqfAV1h2Qp0oMYI= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6 h1:lMO5rYAqUxkmaj76jAkRUvt5JZgFymx/+Q5Mzfivuhc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= -google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= +google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/jcmturner/aescts.v1 v1.0.1 h1:cVVZBK2b1zY26haWB4vbBiZrfFQnfbTVrE3xZq6hrEw= -gopkg.in/jcmturner/aescts.v1 v1.0.1/go.mod h1:nsR8qBOg+OucoIW+WMhB3GspUQXq9XorLnQb9XtvcOo= -gopkg.in/jcmturner/dnsutils.v1 v1.0.1 h1:cIuC1OLRGZrld+16ZJvvZxVJeKPsvd5eUIvxfoN5hSM= -gopkg.in/jcmturner/dnsutils.v1 v1.0.1/go.mod h1:m3v+5svpVOhtFAP/wSz+yzh4Mc0Fg7eRhxkJMWSIz9Q= -gopkg.in/jcmturner/goidentity.v3 v3.0.0 h1:1duIyWiTaYvVx3YX2CYtpJbUFd7/UuPYCfgXtQ3VTbI= -gopkg.in/jcmturner/goidentity.v3 v3.0.0/go.mod h1:oG2kH0IvSYNIu80dVAyu/yoefjq1mNfM5bm88whjWx4= -gopkg.in/jcmturner/gokrb5.v6 v6.1.1 h1:n0KFjpbuM5pFMN38/Ay+Br3l91netGSVqHPHEXeWUqk= -gopkg.in/jcmturner/gokrb5.v6 v6.1.1/go.mod h1:NFjHNLrHQiruory+EmqDXCGv6CrjkeYeA+bR9mIfNFk= -gopkg.in/jcmturner/rpc.v1 v1.1.0 h1:QHIUxTX1ISuAv9dD2wJ9HWQVuWDX/Zc0PfeC2tjc4rU= -gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLvuNnlv8= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools/gotestsum v1.8.2 h1:szU3TaSz8wMx/uG+w/A2+4JUPwH903YYaMI9yOOYAyI= gotest.tools/gotestsum v1.8.2/go.mod h1:6JHCiN6TEjA7Kaz23q1bH0e2Dc3YJjDUZ0DmctFZf+w= -gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= -gotest.tools/v3 v3.2.0/go.mod h1:Mcr9QNxkg0uMvy/YElmo4SpXgJKWgQvYrT7Kw5RzJ1A= gotest.tools/v3 v3.3.0 h1:MfDY1b1/0xN1CyMlQDac0ziEy9zJQd9CXBRRDHw2jJo= gotest.tools/v3 v3.3.0/go.mod h1:Mcr9QNxkg0uMvy/YElmo4SpXgJKWgQvYrT7Kw5RzJ1A= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/helm/.gitignore b/helm/.gitignore new file mode 100644 index 00000000..ba077a40 --- /dev/null +++ b/helm/.gitignore @@ -0,0 +1 @@ +bin diff --git a/helm/.helmignore b/helm/.helmignore new file mode 100644 index 00000000..cbc3a39a --- /dev/null +++ b/helm/.helmignore @@ -0,0 +1,35 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. + +.DS_Store + +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ + +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ + +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ + +# Dir with local binaries +bin + +# Development files (including CI) +ci/ +README.md.gotmpl +Makefile diff --git a/helm/Chart.yaml b/helm/Chart.yaml new file mode 100644 index 00000000..4b112928 --- /dev/null +++ b/helm/Chart.yaml @@ -0,0 +1,18 @@ +apiVersion: v2 +name: sql-exporter +description: Database-agnostic SQL exporter for Prometheus +type: application +version: 0.12.4 +appVersion: 0.18.0 +keywords: + - exporter + - servicemonitor + - sql + - metrics +home: https://github.com/burningalchemist/sql_exporter +sources: + - https://github.com/burningalchemist/sql_exporter +maintainers: + - name: Nikolai Rodionov + email: allanger@zohomail.com + url: https://badhouseplants.net diff --git a/helm/Makefile b/helm/Makefile new file mode 100644 index 00000000..6ea48ae8 --- /dev/null +++ b/helm/Makefile @@ -0,0 +1,8 @@ +LOCALBIN ?= $(shell pwd)/bin +$(LOCALBIN): + mkdir -p $(LOCALBIN) + +.PHONY: gen_docs +gen_docs: ## Generate helm documentation + test -s $(LOCALBIN)/setup-envtest || GOBIN=$(LOCALBIN) go install github.com/norwoodj/helm-docs/cmd/helm-docs@latest + ./bin/helm-docs --template-files=./README.md.gotmpl --sort-values-order file diff --git a/helm/README.md b/helm/README.md new file mode 100644 index 00000000..e87a2d7a --- /dev/null +++ b/helm/README.md @@ -0,0 +1,125 @@ +# sql-exporter + +![Version: 0.12.4](https://img.shields.io/badge/Version-0.12.4-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 0.18.0](https://img.shields.io/badge/AppVersion-0.18.0-informational?style=flat-square) + +Database-agnostic SQL exporter for Prometheus + +## Source Code + +* + +## Maintainers + +| Name | Email | Url | +| ---- | ------ | --- | +| Nikolai Rodionov | | | + +## Installing the Chart + +To install the chart with the release name `sql-exporter`: + +```console +helm repo add sql_exporter https://burningalchemist.github.io/sql_exporter/ +helm install sql_exporter/sql-exporter +``` + +### Ingress support + +It's possible to enable the ingress creation by setting + +```yaml +#Values +ingress: + enabled: true +``` + +But as the sql_operator has a direct connection to databases, +it might expose the database servers to possible DDoS attacks. +It's not recommended by maintainers to use ingress for accessing the exporter, +but if there are no other options, +security measures should be taken. + +For example, a user might enable the basic auth on the ingress level. +Take a look on how it's done at the +[nginx ingress controller](https://kubernetes.github.io/ingress-nginx/examples/auth/basic/) +as an example. + +## Chart Values + +### General parameters + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| nameOverride | string | `""` | Provide a name in place of `sql-exporter` | +| fullnameOverride | string | `""` | String to fully override "sql-exporter.fullname" | +| image.repository | string | `"burningalchemist/sql_exporter"` | Image repository | +| image.pullPolicy | string | `"IfNotPresent"` | Image pull policy | +| image.tag | string | `appVersion` value from `Chart.yaml` | Image tag | +| imagePullSecrets | list | `[]` | Secrets with credentials to pull images from a private registry | +| service.type | string | `"ClusterIP"` | Service type | +| service.port | int | `80` | Service port | +| service.labels | object | `{}` | Service labels | +| service.annotations | object | `{}` | Service annotations | +| ingress.enabled | bool | `false` | | +| ingress.labels | object | `{}` | Ingress labels | +| ingress.annotations | object | `{}` | Ingress annotations | +| ingress.ingressClassName | string | `""` | Ingress class name | +| ingress.host | string | `""` | Ingress host | +| ingress.path | string | `"/"` | Ingress path | +| ingress.tls | object | `{"crt":"","enabled":false,"key":"","secretName":""}` | Ingress TLS, can be defined by cert secret, or by key and cert. | +| ingress.tls.secretName | string | `""` | Ingress tls secret if already exists. | +| ingress.tls.crt | string | `""` | Ingress tls.crt, required if you don't have secret name. | +| ingress.tls.key | string | `""` | Ingress tls.key, required if you don't have secret name. | +| extraContainers | object | `{}` | Arbitrary sidecar containers list | +| initContainers | object | `{}` | Arbitrary sidecar containers list for 1.29+ kubernetes | +| serviceAccount.create | bool | `true` | Specifies whether a Service Account should be created, creates "sql-exporter" service account if true, unless overriden. Otherwise, set to `default` if false, and custom service account name is not provided. Check all the available parameters. | +| serviceAccount.annotations | object | `{}` | Annotations to add to the Service Account | +| livenessProbe.initialDelaySeconds | int | `5` | | +| livenessProbe.timeoutSeconds | int | `30` | | +| readinessProbe.initialDelaySeconds | int | `5` | | +| readinessProbe.timeoutSeconds | int | `30` | | +| resources | object | `{}` | Resource limits and requests for the application controller pods | +| podLabels | object | `{}` | Pod labels | +| podAnnotations | object | `{}` | Pod annotations | +| podSecurityContext | object | `{}` | Pod security context | +| createConfig | bool | `true` | Set to true to create a config as a part of the helm chart | +| logLevel | string | `"debug"` | Set log level (info if unset) | +| logFormat | string | `"logfmt"` | Set log format (logfmt if unset) | +| reloadEnabled | bool | `false` | Enable reload collector data handler (endpoint /reload) | + +### Prometheus ServiceMonitor + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| serviceMonitor.enabled | bool | `true` | Enable ServiceMonitor | +| serviceMonitor.interval | string | `"15s"` | ServiceMonitor interval | +| serviceMonitor.path | string | `"/metrics"` | ServiceMonitor path | +| serviceMonitor.metricRelabelings | object | `{}` | ServiceMonitor metric relabelings | +| serviceMonitor.relabelings | object | `{}` | ServiceMonitor relabelings | +| serviceMonitor.namespace | string | `nil` | ServiceMonitor namespace override (default is .Release.Namespace) | +| serviceMonitor.scrapeTimeout | string | `nil` | ServiceMonitor scrape timeout | + +### Configuration + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| config | object | `{"global":{"max_connections":3,"max_idle_connections":3,"min_interval":"0s","scrape_error_drop_interval":"0s","scrape_timeout":"10s","scrape_timeout_offset":"500ms"}}` | SQL Exporter configuration, can be a dictionary, or a template yaml string. | +| config.global.scrape_timeout | string | `"10s"` | Scrape timeout | +| config.global.scrape_timeout_offset | string | `"500ms"` | Scrape timeout offset. Must be strictly positive. | +| config.global.scrape_error_drop_interval | string | `"0s"` | Interval between dropping scrape_errors_total metric: by default the metric is persistent. | +| config.global.min_interval | string | `"0s"` | Minimum interval between collector runs. | +| config.global.max_connections | int | `3` | Number of open connections. | +| config.global.max_idle_connections | int | `3` | Number of idle connections. | +| target | object | `nil` | Check documentation. Mutually exclusive with `jobs` | +| jobs | list | `nil` | Check documentation. Mutually exclusive with `target` | +| collector_files | list | `[]` | Check documentation | + +To generate the config as a part of a helm release, please set the `.Values.createConfig` to true, and define a config under the `.Values.config` property. + +To configure `target`, `jobs`, `collector_files` please refer to the [documentation](https://github.com/burningalchemist/sql_exporter/blob/master/documentation/sql_exporter.yml) in the source repository. These values are not set by default. + +It's also possible to define collectors (i.e. metrics and queries) in separate files, and specify the filenames in the `collector_files` list. For that we can use `CollectorFiles` field (check `values.yaml` for the available example). + +## Dev Notes + +After changing default `Values`, please execute `make gen_docs` to update the `README.md` file. Readme file is generated by the `helm-docs` tool, so make sure not to edit it manually. diff --git a/helm/README.md.gotmpl b/helm/README.md.gotmpl new file mode 100644 index 00000000..53ce2c84 --- /dev/null +++ b/helm/README.md.gotmpl @@ -0,0 +1,90 @@ +{{ template "chart.header" . }} +{{ template "chart.deprecationWarning" . }} + +{{ template "chart.badgesSection" . }} + +{{ template "chart.description" . }} + +{{ template "chart.sourcesSection" . }} + +{{ template "chart.maintainersSection" . }} + +{{ template "chart.requirementsSection" . }} + + +## Installing the Chart + +To install the chart with the release name `sql-exporter`: + +```console +helm repo add sql_exporter https://burningalchemist.github.io/sql_exporter/ +helm install sql_exporter/sql-exporter +``` + +### Ingress support + +It's possible to enable the ingress creation by setting + +```yaml +#Values +ingress: + enabled: true +``` + +But as the sql_operator has a direct connection to databases, +it might expose the database servers to possible DDoS attacks. +It's not recommended by maintainers to use ingress for accessing the exporter, +but if there are no other options, +security measures should be taken. + +For example, a user might enable the basic auth on the ingress level. +Take a look on how it's done at the +[nginx ingress controller](https://kubernetes.github.io/ingress-nginx/examples/auth/basic/) +as an example. + +## Chart Values + +### General parameters + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +{{- range .Values }} +{{- if not (or (hasPrefix "serviceMonitor" .Key) (hasPrefix "config" .Key) (hasPrefix "collectorFiles" .Key))}} +| {{ .Key }} | {{ .Type }} | {{ if .Default }}{{ .Default }}{{ else }}{{ .AutoDefault }}{{ end }} | {{ if .Description }}{{ .Description }}{{ else }}{{ .AutoDescription }}{{ end }} | + {{- end }} +{{- end }} + + +### Prometheus ServiceMonitor + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +{{- range .Values }} + {{- if hasPrefix "serviceMonitor" .Key }} +| {{ .Key }} | {{ .Type }} | {{ if .Default }}{{ .Default }}{{ else }}{{ .AutoDefault }}{{ end }} | {{ if .Description }}{{ .Description }}{{ else }}{{ .AutoDescription }}{{ end }} | + {{- end }} +{{- end }} +| serviceMonitor.scrapeTimeout | string | `nil` | ServiceMonitor scrape timeout | + +### Configuration + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +{{- range .Values }} + {{- if or (hasPrefix "config" .Key) }} +| {{ .Key }} | {{ .Type }} | {{ if .Default }}{{ .Default }}{{ else }}{{ .AutoDefault }}{{ end }} | {{ if .Description }}{{ .Description }}{{ else }}{{ .AutoDescription }}{{ end }} | + {{- end }} +{{- end }} +| target | object | `nil` | Check documentation. Mutually exclusive with `jobs` | +| jobs | list | `nil` | Check documentation. Mutually exclusive with `target` | +| collector_files | list | `[]` | Check documentation | + +To generate the config as a part of a helm release, please set the `.Values.createConfig` to true, and define a config under the `.Values.config` property. + +To configure `target`, `jobs`, `collector_files` please refer to the [documentation](https://github.com/burningalchemist/sql_exporter/blob/master/documentation/sql_exporter.yml) in the source repository. These values are not set by default. + +It's also possible to define collectors (i.e. metrics and queries) in separate files, and specify the filenames in the `collector_files` list. For that we can use `CollectorFiles` field (check `values.yaml` for the available example). + +## Dev Notes + +After changing default `Values`, please execute `make gen_docs` to update the `README.md` file. Readme file is generated by the `helm-docs` tool, so make sure not to edit it manually. diff --git a/helm/ci/helmfile.yaml b/helm/ci/helmfile.yaml new file mode 100644 index 00000000..ca5942dd --- /dev/null +++ b/helm/ci/helmfile.yaml @@ -0,0 +1,37 @@ +repositories: + - name: bitnami + url: https://charts.bitnami.com/bitnami + - name: prometheus-community + url: https://prometheus-community.github.io/helm-charts + +releases: + - name: postgres-instance + installed: true + namespace: postgres + createNamespace: true + chart: bitnami/postgresql + values: + - global: + postgresql: + auth: + postgresPassword: 123123!! + - name: prometheus-stack + namespace: monitoring + createNamespace: true + chart: prometheus-community/kube-prometheus-stack + values: + - prometheus: + prometheusSpec: + enableAdminAPI: true + podMonitorNamespaceSelector: + any: true + podMonitorSelector: {} + podMonitorSelectorNilUsesHelmValues: false + ruleNamespaceSelector: + any: true + ruleSelector: {} + ruleSelectorNilUsesHelmValues: false + serviceMonitorNamespaceSelector: + any: true + serviceMonitorSelector: {} + serviceMonitorSelectorNilUsesHelmValues: false diff --git a/helm/ci/postgresql-values.yaml b/helm/ci/postgresql-values.yaml new file mode 100644 index 00000000..00dba118 --- /dev/null +++ b/helm/ci/postgresql-values.yaml @@ -0,0 +1,66 @@ +tests: + serviceMonitor: + enabled: true + prom: + service: prometheus-operated + namespace: monitoring + metricsEndpoint: + enabled: true +service: + labels: + deployment: ci + annotations: + prometheus.io/scrape: "true" + +podLabels: + test-label: test-value +podAnnotations: + test/annotation: test-value + +config: + target: + data_source_name: 'postgresql://postgres:123123!!@postgres-instance-postgresql.postgres.svc.cluster.local:5432?sslmode=disable' + collectors: [active_connections] + collectors: + - collector_name: active_connections + metrics: + - metric_name: active_connections + type: gauge + help: 'Active connections' + key_labels: + - "datname" + - "usename" + - "state" + values: + - "count" + query_ref: active_connections + queries: + - query_name: active_connections + query: | + SELECT + datname::text, + usename::text, + state::text, + COUNT(state)::float AS count + FROM pg_stat_activity + GROUP BY datname, usename, state; + collector_files: + - "*.collector.yml" +collectorFiles: + pricing_data_freshness.collector.yml: + collector_name: pricing_data_freshness + metrics: + - metric_name: pricing_update_time + type: gauge + help: 'Time when prices for a market were last updated.' + key_labels: + # Populated from the `market` column of each row. + - Market + static_labels: + # Arbitrary key/value pair + portfolio: income + values: [LastUpdateTime] + query: | + SELECT Market, max(UpdateTime) AS LastUpdateTime + FROM MarketPrices + GROUP BY Market diff --git a/helm/templates/NOTES.txt b/helm/templates/NOTES.txt new file mode 100644 index 00000000..5596a167 --- /dev/null +++ b/helm/templates/NOTES.txt @@ -0,0 +1,26 @@ +{{- $conf := include "sql_exporter.config.yaml" . | fromYaml -}} +------------------------------ +Hello there! + +{{- if and (not $conf.target ) (not $conf.jobs)}} +------------------------------ + +It seems like you haven't configured the target, please check the example here: + + https://github.com/burningalchemist/sql_exporter/blob/master/documentation/sql_exporter.yml#L30 + +In case you need to have multiple targets, you can confiure jobs instead, have a look here + + https://github.com/burningalchemist/sql_exporter#multiple-database-connections + +{{- end}} + +{{- if and (not $conf.collectors) (not $conf.collectorFiles)}} + +------------------------------ +You need to configure either collectors or collectorFiles (or both), please have a look at the example here: + + https://github.com/burningalchemist/sql_exporter#multiple-database-connections + +{{- end }} + diff --git a/helm/templates/_helpers.tpl b/helm/templates/_helpers.tpl new file mode 100644 index 00000000..46543999 --- /dev/null +++ b/helm/templates/_helpers.tpl @@ -0,0 +1,93 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "sql-exporter.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "sql-exporter.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "sql-exporter.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create tls secret name based on the chart name +*/}} +{{- define "sql-exporter.tls.name" -}} +{{- if ((.Values.ingress).tls).secretName -}} +{{- .Values.ingress.tls.secretName }} +{{- else -}} +{{- printf "%s-%s" (include "sql-exporter.fullname" .) "tls" }} +{{- end -}} +{{- end -}} + +{{/* +Common labels +*/}} +{{- define "sql-exporter.labels" -}} +helm.sh/chart: {{ include "sql-exporter.chart" . }} +{{ include "sql-exporter.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "sql-exporter.selectorLabels" -}} +app.kubernetes.io/name: {{ include "sql-exporter.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "sql-exporter.serviceAccountName" -}} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} + +{{- define "sql-exporter.volumes" -}} +{{- if or .Values.createConfig .Values.collectorFiles -}} +{{- true | quote -}} +{{- else if .Values.extraVolumes -}} +{{- true | quote -}} +{{- else -}} +{{- false | quote -}} +{{- end -}} +{{- end -}} + +{{- define "sql_exporter.config.yaml" -}} +{{- $conf := "" -}} +{{- if typeIsLike "string" .Values.config -}} +{{- $conf = (tpl .Values.config .) | fromYaml -}} +{{- else -}} +{{- $conf = .Values.config -}} +{{- end -}} +{{- /* +Do the wired "fromYaml | toYaml" to reformat the config. +Reformat '100s' to 100s for example. +*/ -}} +{{- tpl ($conf | toYaml ) . | fromYaml | toYaml -}} +{{- end -}} diff --git a/helm/templates/configmap.collectors.yaml b/helm/templates/configmap.collectors.yaml new file mode 100644 index 00000000..dedeb155 --- /dev/null +++ b/helm/templates/configmap.collectors.yaml @@ -0,0 +1,20 @@ +{{- if .Values.collectorFiles }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "sql-exporter.fullname" . }} + labels: + {{- include "sql-exporter.labels" . | nindent 4 }} +data: + {{- range $k, $v := .Values.collectorFiles }} + {{ $k }}: |- + {{- if typeIsLike "string" $v -}} + {{- $v = (tpl $v $ | fromYaml) -}} + {{- end -}} + {{- /* + Do the wired "fromYaml | toYaml" to reformat the config. + Reformat '100s' to 100s for example. + */ -}} + {{- tpl (toYaml $v) $ | fromYaml | toYaml | nindent 4}} + {{- end}} +{{- end }} diff --git a/helm/templates/deployment.yaml b/helm/templates/deployment.yaml new file mode 100644 index 00000000..0e84e8d5 --- /dev/null +++ b/helm/templates/deployment.yaml @@ -0,0 +1,140 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "sql-exporter.fullname" . }} + labels: + {{- include "sql-exporter.labels" . | nindent 4 }} +spec: + replicas: {{ .Values.replicaCount }} + selector: + matchLabels: + {{- include "sql-exporter.selectorLabels" . | nindent 6 }} + template: + metadata: + annotations: + checksum/config: {{ include (print $.Template.BasePath "/secret.configuration.yaml") . | sha256sum }} + checksum/collectors: {{ include (print $.Template.BasePath "/configmap.collectors.yaml") . | sha256sum }} + {{- with .Values.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "sql-exporter.selectorLabels" . | nindent 8 }} + {{- with .Values.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + serviceAccountName: {{ if .Values.serviceAccount.create }}{{ template "sql-exporter.fullname" . }}{{ else }}{{ include "sql-exporter.serviceAccountName" . }}{{end}} + {{- if eq (include "sql-exporter.volumes" .) "\"true\"" }} + volumes: + {{- if .Values.createConfig }} + - name: sql-exporter + secret: + secretName: {{ include "sql-exporter.fullname" . }} + {{- end }} + {{- if .Values.collectorFiles }} + - name: sql-collector + configMap: + name: {{ include "sql-exporter.fullname" . }} + {{- end }} + {{- end }} + {{- range $v := .Values.extraVolumes }} + - name: {{ $v.name }} + {{- toYaml $v.volume | nindent 10 }} + {{- end }} +{{- if .Values.initContainers }} + initContainers: +{{ toYaml .Values.initContainers | nindent 8 }} +{{- end }} + containers: + - name: {{ .Chart.Name }} + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} + image: "{{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + - "-config.file=/etc/sql_exporter/sql_exporter.yml" + - "-log.level={{ .Values.logLevel | default "info" }}" + - "-log.format={{ .Values.logFormat | default "logfmt" }}" + {{- if .Values.reloadEnabled }} + - "-web.enable-reload" + {{- end }} + {{- if eq (include "sql-exporter.volumes" .) "\"true\"" }} + volumeMounts: + {{- if .Values.createConfig }} + - name: sql-exporter + readOnly: true + mountPath: /etc/sql_exporter/ + {{- end }} + {{- if .Values.collectorFiles }} + - name: sql-collector + readOnly: true + mountPath: /etc/sql_exporter/collectors/ + {{- end }} + {{- range $v := .Values.extraVolumes }} + - name: {{ $v.name }} + {{- toYaml $v.mount | nindent 12 }} + {{- end }} + {{- end }} + {{- with .Values.envFrom }} + envFrom: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- if .Values.env }} + env: + {{- range $key, $value := .Values.env }} + - name: {{ $key }} + {{- if $value.value }} + value: {{ $value.value }} + {{- else }} + valueFrom: + {{- if eq $value.from.kind "Secret" }} + secretKeyRef: + {{- else if eq $value.from.kind "ConfigMap" }} + configMapKeyRef: + {{- else }} + {{- fail "Values.env[].from.kind should be either Secret or ConfigMap" }} + {{- end }} + name: {{ $value.from.name }} + key: {{ $value.from.key }} + {{- end }} + {{- end }} + {{- end }} + livenessProbe: + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + httpGet: + path: /healthz + port: 9399 + readinessProbe: + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + httpGet: + path: /healthz + port: 9399 + ports: + - name: http + containerPort: 9399 + protocol: TCP + resources: + {{- toYaml .Values.resources | nindent 12 }} + {{- with .Values.extraContainers }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/helm/templates/ingress.yaml b/helm/templates/ingress.yaml new file mode 100644 index 00000000..62da85e4 --- /dev/null +++ b/helm/templates/ingress.yaml @@ -0,0 +1,38 @@ +{{- if (.Values.ingress).enabled -}} +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: {{ include "sql-exporter.fullname" . }} + labels: + {{- include "sql-exporter.labels" . | nindent 4 }} + {{- with .Values.ingress.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.ingress.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + {{- if .Values.ingress.ingressClassName }} + ingressClassName: {{ .Values.ingress.ingressClassName }} + {{- end }} + {{- if (.Values.ingress.tls).enabled }} + tls: + - hosts: + - {{ .Values.ingress.host | required "Ingress host is required if tls is enabled!" }} + secretName: {{ include "sql-exporter.tls.name" . }} + {{- end }} + rules: + - http: + paths: + - path: {{ .Values.ingress.path }} + pathType: Prefix + backend: + service: + name: {{ include "sql-exporter.fullname" . }} + port: + number: {{ .Values.service.port }} + {{- if .Values.ingress.host }} + host: {{ .Values.ingress.host }} + {{- end }} +{{- end -}} diff --git a/helm/templates/secret.configuration.yaml b/helm/templates/secret.configuration.yaml new file mode 100644 index 00000000..33366421 --- /dev/null +++ b/helm/templates/secret.configuration.yaml @@ -0,0 +1,15 @@ +# --------------------------------------------------------------------- +# -- This secret holds the config file of sql_exporter +# --------------------------------------------------------------------- +{{- if .Values.createConfig }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "sql-exporter.fullname" . }} + labels: + {{- include "sql-exporter.labels" . | nindent 4 }} +type: Opaque +stringData: + sql_exporter.yml: |- + {{- include "sql_exporter.config.yaml" . | nindent 4 }} +{{- end }} diff --git a/helm/templates/secret.tls.yaml b/helm/templates/secret.tls.yaml new file mode 100644 index 00000000..42d85390 --- /dev/null +++ b/helm/templates/secret.tls.yaml @@ -0,0 +1,15 @@ +# --------------------------------------------------------------------- +# -- This secret holds the tls key and cert of sql_exporter's ingress +# --------------------------------------------------------------------- +{{- if and (((.Values.ingress).tls).enabled) (not ((.Values.ingress).tls).secretName) -}} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "sql-exporter.tls.name" . }} + labels: + {{- include "sql-exporter.labels" . | nindent 4 }} +type: Opaque +data: + tls.crt: {{ (tpl (.Values.ingress.tls.crt | required "crt is required if you want to create tls secret.") .) | required "crt is required if you want to create tls secret." | b64enc | quote }} + tls.key: {{ (tpl (.Values.ingress.tls.key | required "private key is required if you want to create tls secret.") .) | required "private key is required if you want to create tls secret." | b64enc | quote }} +{{- end }} diff --git a/helm/templates/service.yaml b/helm/templates/service.yaml new file mode 100644 index 00000000..c79e4f6f --- /dev/null +++ b/helm/templates/service.yaml @@ -0,0 +1,22 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "sql-exporter.fullname" . }} + labels: + {{- include "sql-exporter.labels" . | nindent 4 }} + {{- with .Values.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.service.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: http + protocol: TCP + name: http + selector: + {{- include "sql-exporter.selectorLabels" . | nindent 4 }} diff --git a/helm/templates/serviceaccount.yaml b/helm/templates/serviceaccount.yaml new file mode 100644 index 00000000..00654964 --- /dev/null +++ b/helm/templates/serviceaccount.yaml @@ -0,0 +1,16 @@ +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "sql-exporter.fullname" . }} + {{- with .Values.serviceAccount.annotations}} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} + labels: + {{- with .Values.serviceAccount.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- include "sql-exporter.labels" . | nindent 4 }} +automountServiceAccountToken: {{ default "false" .Values.serviceAccount.automountServiceAccountToken }} +{{- end }} diff --git a/helm/templates/servicemonitor.yaml b/helm/templates/servicemonitor.yaml new file mode 100644 index 00000000..b0d61a34 --- /dev/null +++ b/helm/templates/servicemonitor.yaml @@ -0,0 +1,46 @@ +{{- if .Values.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "sql-exporter.fullname" . }} + {{- if .Values.serviceMonitor.namespace }} + namespace: {{ .Values.serviceMonitor.namespace }} + {{- else }} + namespace: {{ .Release.Namespace }} + {{- end }} + labels: + {{- include "sql-exporter.labels" . | nindent 4 }} + {{- range $key, $value := .Values.serviceMonitor.selector }} + {{ $key }}: {{ $value | quote }} + {{- end }} +spec: + selector: + matchLabels: + {{- include "sql-exporter.selectorLabels" . | nindent 6 }} + endpoints: + - port: http + {{- if .Values.serviceMonitor.path }} + path: {{ .Values.serviceMonitor.path }} + {{- end }} + {{- if .Values.serviceMonitor.interval }} + interval: {{ .Values.serviceMonitor.interval }} + {{- end }} + {{- if .Values.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.serviceMonitor.metricRelabelings }} + metricRelabelings: + {{- toYaml .Values.serviceMonitor.metricRelabelings | nindent 8 }} + {{- end }} + {{- if .Values.serviceMonitor.relabelings }} + relabelings: + {{- toYaml .Values.serviceMonitor.relabelings | nindent 8 }} + {{- end }} + namespaceSelector: + matchNames: + {{- if .Values.serviceMonitor.namespace }} + - {{ .Values.serviceMonitor.namespace }} + {{- else }} + - {{ .Release.Namespace }} + {{- end }} +{{- end }} diff --git a/helm/templates/tests/test-connection.yaml b/helm/templates/tests/test-connection.yaml new file mode 100644 index 00000000..f77d13c6 --- /dev/null +++ b/helm/templates/tests/test-connection.yaml @@ -0,0 +1,50 @@ +{{- if (((.Values.tests).metricsEndpoint).enabled) }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "sql-exporter.fullname" . }}-test-script + labels: + {{- include "sql-exporter.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": test + "helm.sh/hook-weight": "1" + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded +data: + test.sh: |- + #! /bin/sh + STATUS=$(curl {{ include "sql-exporter.fullname" . }}:80/metrics --head -s | awk '/^HTTP/{print $2}') + if [ "$STATUS" != 200 ]; then + echo "sql-exporter didn't return code 200, probably something is broken" + exit 1; + fi + echo "metrics endpoint returned 200" +--- +apiVersion: v1 +kind: Pod +metadata: + name: "{{ include "sql-exporter.fullname" . }}-test-connection" + labels: + {{- include "sql-exporter.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": test + "helm.sh/hook-weight": "2" + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded +spec: + volumes: + - name: test-script + configMap: + name: "{{ include "sql-exporter.fullname" . }}-test-script" + containers: + - name: check-metrics-endpoint + image: alpine/curl + command: ['sh'] + volumeMounts: + - name: test-script + readOnly: true + mountPath: /test.sh + subPath: test.sh + args: + - /test.sh + restartPolicy: Never +{{- end }} diff --git a/helm/templates/tests/test-servicemonitor.yaml b/helm/templates/tests/test-servicemonitor.yaml new file mode 100644 index 00000000..7789f543 --- /dev/null +++ b/helm/templates/tests/test-servicemonitor.yaml @@ -0,0 +1,64 @@ +{{- if (((.Values.tests).serviceMonitor).enabled) }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "sql-exporter.fullname" . }}-test-prom-script + labels: + {{- include "sql-exporter.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": test + "helm.sh/hook-weight": "1" + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded +data: + test.sh: |- + #! /bin/sh + # ----------------------------------------------------------------- + # -- JQ is required for this test, but since the jq image doesn't + # -- have curl installed, it's not an option to use it. + # -- Also, it doesn't have any shell installed, so we can't use + # -- it to copy the binary to an emptydir. + # -- That's why I'm using apk add here + # ----------------------------------------------------------------- + apk update && apk add jq + sleep {{ .Values.serviceMonitor.interval }} + URL="{{.Values.tests.serviceMonitor.prom.service}}.{{.Values.tests.serviceMonitor.prom.namespace}}.svc.cluster.local" + CURL_RES=$(curl -q "http://${URL}:9090/api/v1/query?query=active_connections") + STATUS=$(echo "$CURL_RES" | jq -r '.status') + if [ "$STATUS" != "success" ]; then + echo "metric doesn't have a status 'success' in the prometheus" + echo "curl output is: $CURL_RES" + exit 1; + fi + echo "Prometheus rerturns success for the sql-exporter metric" + +--- +apiVersion: v1 +kind: Pod +metadata: + name: "{{ include "sql-exporter.fullname" . }}-test-prom" + labels: + {{- include "sql-exporter.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": test + "helm.sh/hook-weight": "2" + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded +spec: + volumes: + - name: test-script + configMap: + name: "{{ include "sql-exporter.fullname" . }}-test-prom-script" + containers: + - name: check-metrics-endpoint + image: alpine/curl + command: + - sh + volumeMounts: + - name: test-script + readOnly: true + mountPath: /test.sh + subPath: test.sh + args: + - /test.sh + restartPolicy: Never +{{- end}} diff --git a/helm/values.yaml b/helm/values.yaml new file mode 100644 index 00000000..df116d93 --- /dev/null +++ b/helm/values.yaml @@ -0,0 +1,223 @@ +# -- Provide a name in place of `sql-exporter` +nameOverride: "" +# -- String to fully override "sql-exporter.fullname" +fullnameOverride: "" +image: + # -- Image repository + repository: burningalchemist/sql_exporter + # -- Image pull policy + pullPolicy: IfNotPresent + # -- Image tag + # @default -- `appVersion` value from `Chart.yaml` + tag: "" +# -- Secrets with credentials to pull images from a private registry +imagePullSecrets: [] +service: + # -- Service type + type: ClusterIP + # -- Service port + port: 80 + # -- Service labels + labels: {} + # -- Service annotations + annotations: {} + # example of prometheus usage + # prometheus.io/scrape: "true" + # prometheus.io/path: "/metrics" +ingress: + enabled: false + # -- Ingress labels + labels: {} + # -- Ingress annotations + annotations: {} + # -- Ingress class name + ingressClassName: "" + # -- Ingress host + host: "" + # -- Ingress path + path: "/" + # -- Ingress TLS, can be defined by cert secret, or by key and cert. + tls: + enabled: false + # -- Ingress tls secret if already exists. + secretName: "" + # -- Ingress tls.crt, required if you don't have secret name. + crt: "" + # crt: "{{- .Files.Get \"tls.crt\" -}}" + # -- Ingress tls.key, required if you don't have secret name. + key: "" + # key: "{{- .Files.Get \"tls.key\" -}}" + +# -- Arbitrary sidecar containers list +extraContainers: {} +# - name: your_sidecar +# image: gcr.io/your_image:your_tag +# args: +# resources: +# requests:{} + +# -- Arbitrary sidecar containers list for 1.29+ kubernetes +initContainers: {} + +serviceAccount: + # -- Specifies whether a Service Account should be created, creates "sql-exporter" service account if true, unless + # overriden. Otherwise, set to `default` if false, and custom service account name is not provided. Check all the + # available parameters. + create: true + # -- References a custom Service Account if it already exists + # name: "sql-exporter-custom-sa" + # -- Annotations to add to the Service Account + annotations: {} + ## example annotations ## + # annotations: + # iam.gke.io/gcp-service-account: my-service-account@gke.url + # -- Defines if token is automatically mounted to the pod after it has been created + # automountServiceAccountToken: false +# Liveness and readiness probes for the application controller pods +livenessProbe: + initialDelaySeconds: 5 + timeoutSeconds: 30 + +readinessProbe: + initialDelaySeconds: 5 + timeoutSeconds: 30 +# -- Resource limits and requests for the application controller pods +resources: {} + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi +# -- Pod labels +podLabels: {} +# -- Pod annotations +podAnnotations: {} +# -- Pod security context +podSecurityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 +# @ignored +securityContext: {} +# Prometheus Operator values +serviceMonitor: + # -- Enable ServiceMonitor + enabled: true + # -- ServiceMonitor interval + interval: 15s + # -- ServiceMonitor path + path: /metrics + # -- ServiceMonitor scrape timeout + # scrapeTimeout: 10s + # -- ServiceMonitor metric relabelings + metricRelabelings: {} + # -- ServiceMonitor relabelings + relabelings: {} + # -- ServiceMonitor namespace override (default is .Release.Namespace) + namespace: ~ +# Additional env variables +# - kind should be either Secret or ConfigMap +# - name is the name of the Secret or ConfigMap that should be used +# - key is the key of the object inside of a Secret or ConfigMap +# env: +# SQLEXPORTER_TARGET_DSN: +# from: +# kind: Secret +# name: sql_exporter_secret +# key: CONNECTION_STRING +# envFrom: +# - configMapRef: +# name: env-configmap +# - secretRef: +# name: env-secrets +# extraVolumes: +# - name: configmap-mount +# volume: +# configMap: +# name: log-config +# items: +# - key: log_level +# path: log_level +# mount: +# readOnly: true +# mountPath: /etc/config +# -- Set to true to create a config as a part of the helm chart +createConfig: true +# -- Set log level (info if unset) +logLevel: debug +# -- Set log format (logfmt if unset) +logFormat: logfmt +# -- Enable reload collector data handler (endpoint /reload) +reloadEnabled: false +# -- SQL Exporter configuration, can be a dictionary, or a template yaml string. +config: + global: + # -- Scrape timeout + scrape_timeout: 10s + # -- Scrape timeout offset. Must be strictly positive. + scrape_timeout_offset: 500ms + # -- Interval between dropping scrape_errors_total metric: by default the metric is persistent. + scrape_error_drop_interval: 0s + # -- Minimum interval between collector runs. + min_interval: 0s + # -- Number of open connections. + max_connections: 3 + # -- Number of idle connections. + max_idle_connections: 3 +# Target and collectors are not set so the chart is more flexible. Please configure it yourself. +# target: +# data_source_name: 'sqlserver://prom_user:prom_password@dbserver1.example.com:1433' +# collectors: [active_connections] +# collectors: +# - collector_name: active_connections +# metrics: +# - metric_name: active_connections +# type: gauge +# help: 'Active connections' +# key_labels: +# - "datname" +# - "usename" +# - "state" +# values: +# - "count" +# query_ref: active_connections +# queries: +# - query_name: active_connections +# query: | +# SELECT +# datname::text, +# usename::text, +# state::text, +# COUNT(state)::float AS count +# FROM pg_stat_activity +# GROUP BY datname, usename, state; +# collector_files: +# - "*.collector.yml" +# --------------------------------------------------------------------- +# -- Collector Files +# (can be dictionaries or yaml string templates) +# --------------------------------------------------------------------- +# -- Collector files are mounted to /etc/sql_exporter/collectors dir +# --------------------------------------------------------------------- +# collectorFiles: +# pricing_data_freshness.collector.yml: +# collector_name: pricing_data_freshness +# metrics: +# - metric_name: pricing_update_time +# type: gauge +# help: 'Time when prices for a market were last updated.' +# key_labels: +# # Populated from the `market` column of each row. +# - Market +# static_labels: +# # Arbitrary key/value pair +# portfolio: income +# values: [LastUpdateTime] +# query: | +# SELECT Market, max(UpdateTime) AS LastUpdateTime +# FROM MarketPrices +# GROUP BY Market diff --git a/job.go b/job.go index 0d5b672e..e2963b6c 100644 --- a/job.go +++ b/job.go @@ -25,14 +25,18 @@ func NewJob(jc *config.JobConfig, gc *config.GlobalConfig) (Job, errors.WithCont j := job{ config: jc, targets: make([]Target, 0, 10), - logContext: fmt.Sprintf("job=%q", jc.Name), + logContext: fmt.Sprintf(`job=%s`, jc.Name), + } + + if jc.EnablePing == nil { + jc.EnablePing = &config.EnablePing } for _, sc := range jc.StaticConfigs { for tname, dsn := range sc.Targets { constLabels := prometheus.Labels{ - "job": jc.Name, - "instance": tname, + "job": jc.Name, + config.TargetLabel: tname, } for name, value := range sc.Labels { // Shouldn't happen as there are sanity checks in config, but check nonetheless. @@ -41,7 +45,7 @@ func NewJob(jc *config.JobConfig, gc *config.GlobalConfig) (Job, errors.WithCont } constLabels[name] = value } - t, err := NewTarget(j.logContext, tname, string(dsn), jc.Collectors(), constLabels, gc) + t, err := NewTarget(j.logContext, tname, jc.Name, string(dsn), jc.Collectors(), constLabels, gc, jc.EnablePing) if err != nil { return nil, err } diff --git a/metric.go b/metric.go index 07cf3d9f..a498003d 100644 --- a/metric.go +++ b/metric.go @@ -1,8 +1,10 @@ package sql_exporter import ( + "database/sql" "fmt" "sort" + "time" "github.com/burningalchemist/sql_exporter/config" "github.com/burningalchemist/sql_exporter/errors" @@ -35,9 +37,9 @@ type MetricFamily struct { // NewMetricFamily creates a new MetricFamily with the given metric config and const labels (e.g. job and instance). func NewMetricFamily(logContext string, mc *config.MetricConfig, constLabels []*dto.LabelPair) (*MetricFamily, errors.WithContext) { - logContext = fmt.Sprintf("%s, metric=%q", logContext, mc.Name) + logContext = TrimMissingCtx(fmt.Sprintf(`%s,metric=%s`, logContext, mc.Name)) - if len(mc.Values) == 0 { + if len(mc.Values) == 0 && mc.StaticValue == nil { return nil, errors.New(logContext, "no value column defined") } if len(mc.Values) > 1 && mc.ValueLabel == "" { @@ -73,16 +75,30 @@ func NewMetricFamily(logContext string, mc *config.MetricConfig, constLabels []* } // Collect is the equivalent of prometheus.Collector.Collect() but takes a Query output map to populate values from. -func (mf MetricFamily) Collect(row map[string]interface{}, ch chan<- Metric) { +func (mf MetricFamily) Collect(row map[string]any, ch chan<- Metric) { labelValues := make([]string, len(mf.labels)) for i, label := range mf.config.KeyLabels { - labelValues[i] = row[label].(string) + labelValues[i] = row[label].(sql.NullString).String } for _, v := range mf.config.Values { if mf.config.ValueLabel != "" { labelValues[len(labelValues)-1] = v } - value := row[v].(float64) + value := row[v].(sql.NullFloat64) + if value.Valid { + metric := NewMetric(&mf, value.Float64, labelValues...) + if mf.config.TimestampValue == "" { + ch <- metric + } else { + ts := row[mf.config.TimestampValue].(sql.NullTime) + if ts.Valid { + ch <- NewMetricWithTimestamp(ts.Time, metric) + } + } + } + } + if mf.config.StaticValue != nil { + value := *mf.config.StaticValue ch <- NewMetric(&mf, value, labelValues...) } } @@ -279,3 +295,18 @@ func NewInvalidMetric(err errors.WithContext) Metric { func (m invalidMetric) Desc() MetricDesc { return nil } func (m invalidMetric) Write(*dto.Metric) errors.WithContext { return m.err } + +type timestampedMetric struct { + Metric + t time.Time +} + +func (m timestampedMetric) Write(pb *dto.Metric) errors.WithContext { + e := m.Metric.Write(pb) + pb.TimestampMs = proto.Int64(m.t.Unix()*1000 + int64(m.t.Nanosecond()/1000000)) + return e +} + +func NewMetricWithTimestamp(t time.Time, m Metric) Metric { + return timestampedMetric{Metric: m, t: t} +} diff --git a/packaging/conf/sql_exporter.default b/packaging/conf/sql_exporter.default index 083d83c2..3ad152fd 100644 --- a/packaging/conf/sql_exporter.default +++ b/packaging/conf/sql_exporter.default @@ -1,6 +1,6 @@ CONF_FILE=/etc/sql_exporter/sql_exporter.yml LISTEN_ADDRESS=0.0.0.0:9399 -LOG_JSON=false +LOG_FORMAT=logfmt LOG_LEVEL=debug ENABLE_RELOAD=false METRICS_PATH=/metrics diff --git a/packaging/deb/postinstall b/packaging/deb/postinstall index c60768d6..2a9360bd 100644 --- a/packaging/deb/postinstall +++ b/packaging/deb/postinstall @@ -7,30 +7,31 @@ set -e IS_UPGRADE=false case "$1" in - configure) - [ -z "$SQL_EXPORTER_USER" ] && SQL_EXPORTER_USER="sql_exporter" - [ -z "$SQL_EXPORTER_GROUP" ] && SQL_EXPORTER_GROUP="sql_exporter" - if ! getent group "$SQL_EXPORTER_GROUP" > /dev/null 2>&1 ; then - addgroup --system "$SQL_EXPORTER_GROUP" --quiet - fi - if ! id $SQL_EXPORTER_USER > /dev/null 2>&1 ; then - adduser --system --home /usr/share/sql_exporter \ - --ingroup "$SQL_EXPORTER_GROUP" --disabled-password --shell /bin/false \ - "$SQL_EXPORTER_USER" - fi +configure) + [ -z "$SQL_EXPORTER_USER" ] && SQL_EXPORTER_USER="sql_exporter" + [ -z "$SQL_EXPORTER_GROUP" ] && SQL_EXPORTER_GROUP="sql_exporter" + if ! getent group "$SQL_EXPORTER_GROUP" >/dev/null 2>&1; then + addgroup --system "$SQL_EXPORTER_GROUP" --quiet + fi + if ! id $SQL_EXPORTER_USER >/dev/null 2>&1; then + adduser --system --home /usr/share/sql_exporter \ + --ingroup "$SQL_EXPORTER_GROUP" --disabled-password --shell /bin/false \ + "$SQL_EXPORTER_USER" + fi # copy user config files - if [ ! -f $CONF_FILE ]; then - CONF_PATH=$(dirname "$CONF_FILE") - cp /usr/share/sql_exporter/sql_exporter.yml $CONF_FILE - cp /usr/share/sql_exporter/mssql_example.collector.yml $CONF_PATH - fi - - # configuration files should not be modifiable by sql_exporter user, as this can be a security issue - chown -Rh root:$SQL_EXPORTER_GROUP /etc/sql_exporter/ - chmod 755 /etc/sql_exporter - find /etc/sql_exporter -type f -exec chmod 640 {} ';' - find /etc/sql_exporter -type d -exec chmod 755 {} ';' + if [ ! -f $CONF_FILE ]; then + CONF_PATH=$(dirname "$CONF_FILE") + mkdir -p $CONF_PATH + cp /usr/share/sql_exporter/sql_exporter.yml $CONF_FILE + cp /usr/share/sql_exporter/mssql_example.collector.yml $CONF_PATH + fi + + # configuration files should not be modifiable by sql_exporter user, as this can be a security issue + chown -Rh root:$SQL_EXPORTER_GROUP /etc/sql_exporter/ + chmod 755 /etc/sql_exporter + find /etc/sql_exporter -type f -exec chmod 640 {} ';' + find /etc/sql_exporter -type d -exec chmod 755 {} ';' # If $1=configure and $2 is set, this is an upgrade if [ "$2" != "" ]; then @@ -55,6 +56,6 @@ case "$1" in fi echo " OK" - fi - ;; + fi + ;; esac diff --git a/packaging/deb/sql_exporter.service b/packaging/deb/sql_exporter.service index cc052630..ae9e4fb0 100644 --- a/packaging/deb/sql_exporter.service +++ b/packaging/deb/sql_exporter.service @@ -13,7 +13,7 @@ Restart=on-failure WorkingDirectory=/usr/share/sql_exporter RuntimeDirectory=sql_exporter RuntimeDirectoryMode=0750 -ExecStart=/usr/bin/sql_exporter -config.file=${CONF_FILE} -web.listen-address=${LISTEN_ADDRESS} -log.json=${LOG_JSON} -log.level=${LOG_LEVEL} -web.enable-reload=${ENABLE_RELOAD} -web.metrics-path=${METRICS_PATH} -web.config.file=${WEB_CONFIG_FILE} +ExecStart=/usr/bin/sql_exporter -config.file=${CONF_FILE} -web.listen-address=${LISTEN_ADDRESS} -log.format=${LOG_FORMAT} -log.level=${LOG_LEVEL} -web.enable-reload=${ENABLE_RELOAD} -web.metrics-path=${METRICS_PATH} -web.config.file=${WEB_CONFIG_FILE} LimitNOFILE=10000 TimeoutStopSec=20 CapabilityBoundingSet= diff --git a/packaging/rpm/postinstall b/packaging/rpm/postinstall index 0bdfd1f4..a26d1103 100644 --- a/packaging/rpm/postinstall +++ b/packaging/rpm/postinstall @@ -7,30 +7,31 @@ set -e IS_UPGRADE=false case "$1" in - configure) - [ -z "$SQL_EXPORTER_USER" ] && SQL_EXPORTER_USER="sql_exporter" - [ -z "$SQL_EXPORTER_GROUP" ] && SQL_EXPORTER_GROUP="sql_exporter" - if ! getent group "$SQL_EXPORTER_GROUP" > /dev/null 2>&1 ; then - groupadd -r "$SQL_EXPORTER_GROUP" - fi - if ! getent passwd $SQL_EXPORTER_USER > /dev/null 2>&1 ; then - useradd -r -d /usr/share/sql_exporter \ - -g "$SQL_EXPORTER_GROUP" -s /sbin/nologin \ - "$SQL_EXPORTER_USER" - fi +configure) + [ -z "$SQL_EXPORTER_USER" ] && SQL_EXPORTER_USER="sql_exporter" + [ -z "$SQL_EXPORTER_GROUP" ] && SQL_EXPORTER_GROUP="sql_exporter" + if ! getent group "$SQL_EXPORTER_GROUP" >/dev/null 2>&1; then + groupadd -r "$SQL_EXPORTER_GROUP" + fi + if ! getent passwd $SQL_EXPORTER_USER >/dev/null 2>&1; then + useradd -r -d /usr/share/sql_exporter \ + -g "$SQL_EXPORTER_GROUP" -s /sbin/nologin \ + "$SQL_EXPORTER_USER" + fi # copy user config files - if [ ! -f $CONF_FILE ]; then - CONF_PATH=$(dirname "$CONF_FILE") - cp /usr/share/sql_exporter/sql_exporter.yml "$CONF_FILE" - cp /usr/share/sql_exporter/mssql_example.collector.yml "$CONF_PATH" - fi - - # configuration files should not be modifiable by sql_exporter user, as this can be a security issue - chown -Rh root:$SQL_EXPORTER_GROUP /etc/sql_exporter/ - chmod 755 /etc/sql_exporter - find /etc/sql_exporter -type f -exec chmod 640 {} ';' - find /etc/sql_exporter -type d -exec chmod 755 {} ';' + if [ ! -f $CONF_FILE ]; then + CONF_PATH=$(dirname "$CONF_FILE") + mkdir -p $CONF_PATH + cp /usr/share/sql_exporter/sql_exporter.yml "$CONF_FILE" + cp /usr/share/sql_exporter/mssql_example.collector.yml "$CONF_PATH" + fi + + # configuration files should not be modifiable by sql_exporter user, as this can be a security issue + chown -Rh root:$SQL_EXPORTER_GROUP /etc/sql_exporter/ + chmod 755 /etc/sql_exporter + find /etc/sql_exporter -type f -exec chmod 640 {} ';' + find /etc/sql_exporter -type d -exec chmod 755 {} ';' # If $1=configure and $2 is set, this is an upgrade if [ "$2" != "" ]; then @@ -55,6 +56,6 @@ case "$1" in fi echo " OK" - fi - ;; + fi + ;; esac diff --git a/packaging/rpm/sql_exporter.service b/packaging/rpm/sql_exporter.service index ce6280f4..a92ca58a 100644 --- a/packaging/rpm/sql_exporter.service +++ b/packaging/rpm/sql_exporter.service @@ -5,7 +5,7 @@ Wants=network-online.target After=network-online.target [Service] -EnvironmentFile=/etc/default/prometheus-sql-exporter +EnvironmentFile=/etc/sysconfig/sql_exporter User=sql_exporter Group=sql_exporter Type=simple @@ -13,7 +13,7 @@ Restart=on-failure WorkingDirectory=/usr/share/sql_exporter RuntimeDirectory=sql_exporter RuntimeDirectoryMode=0750 -ExecStart=/usr/bin/sql_exporter -config.file=${CONF_FILE} -web.listen-address=${LISTEN_ADDRESS} -log.json=${LOG_JSON} -log-level=${LOG_LEVEL} -web.enable-reload=${ENABLE_RELOAD} -web.metrics-path=${METRICS_PATH} -web.config.file=${WEB_CONFIG_FILE} +ExecStart=/usr/bin/sql_exporter -config.file=${CONF_FILE} -web.listen-address=${LISTEN_ADDRESS} -log.format=${LOG_FORMAT} -log.level=${LOG_LEVEL} -web.enable-reload=${ENABLE_RELOAD} -web.metrics-path=${METRICS_PATH} -web.config.file=${WEB_CONFIG_FILE} LimitNOFILE=10000 TimeoutStopSec=20 CapabilityBoundingSet= diff --git a/query.go b/query.go index 64dde889..fed479ed 100644 --- a/query.go +++ b/query.go @@ -4,10 +4,11 @@ import ( "context" "database/sql" "fmt" + "log/slog" + "time" "github.com/burningalchemist/sql_exporter/config" "github.com/burningalchemist/sql_exporter/errors" - "k8s.io/klog/v2" ) // Query wraps a sql.Stmt and all the metrics populated from it. It helps extract keys and values from result rows. @@ -28,13 +29,14 @@ type ( ) const ( - columnTypeKey = 1 - columnTypeValue = 2 + columnTypeKey columnType = 1 + columnTypeValue columnType = 2 + columnTypeTime columnType = 3 ) // NewQuery returns a new Query that will populate the given metric families. func NewQuery(logContext string, qc *config.QueryConfig, metricFamilies ...*MetricFamily) (*Query, errors.WithContext) { - logContext = fmt.Sprintf("%s, query=%q", logContext, qc.Name) + logContext = TrimMissingCtx(fmt.Sprintf(`%s,query=%s`, logContext, qc.Name)) columnTypes := make(columnTypeMap) @@ -49,6 +51,11 @@ func NewQuery(logContext string, qc *config.QueryConfig, metricFamilies ...*Metr return nil, err } } + if mf.config.TimestampValue != "" { + if err := setColumnType(logContext, mf.config.TimestampValue, columnTypeTime, columnTypes); err != nil { + return nil, err + } + } } q := Query{ @@ -77,6 +84,7 @@ func setColumnType(logContext, columnName string, ctype columnType, columnTypes func (q *Query) Collect(ctx context.Context, conn *sql.DB, ch chan<- Metric) { if ctx.Err() != nil { ch <- NewInvalidMetric(errors.Wrap(q.logContext, ctx.Err())) + return } rows, err := q.run(ctx, conn) @@ -88,6 +96,10 @@ func (q *Query) Collect(ctx context.Context, conn *sql.DB, ch chan<- Metric) { dest, err := q.scanDest(rows) if err != nil { + if config.IgnoreMissingVals { + slog.Warn("Ignoring missing values", "logContext", q.logContext) + return + } ch <- NewInvalidMetric(err) return } @@ -108,10 +120,22 @@ func (q *Query) Collect(ctx context.Context, conn *sql.DB, ch chan<- Metric) { // run executes the query on the provided database, in the provided context. func (q *Query) run(ctx context.Context, conn *sql.DB) (*sql.Rows, errors.WithContext) { + if slog.Default().Enabled(ctx, slog.LevelDebug) { + start := time.Now() + defer func() { + slog.Debug("Query execution time", "logContext", q.logContext, "duration", time.Since(start)) + }() + } + if q.conn != nil && q.conn != conn { panic(fmt.Sprintf("[%s] Expecting to always run on the same database handle", q.logContext)) } + if q.config.NoPreparedStatement { + rows, err := conn.QueryContext(ctx, q.config.Query) + return rows, errors.Wrap(q.logContext, err) + } + if q.stmt == nil { stmt, err := conn.PrepareContext(ctx, q.config.Query) if err != nil { @@ -126,30 +150,33 @@ func (q *Query) run(ctx context.Context, conn *sql.DB) (*sql.Rows, errors.WithCo // scanDest creates a slice to scan the provided rows into, with strings for keys, float64s for values and interface{} // for any extra columns. -func (q *Query) scanDest(rows *sql.Rows) ([]interface{}, errors.WithContext) { +func (q *Query) scanDest(rows *sql.Rows) ([]any, errors.WithContext) { columns, err := rows.Columns() if err != nil { return nil, errors.Wrap(q.logContext, err) } - klog.V(3).Infof(`returned_columns="%v"%v`, columns, q.logContext) + slog.Debug("Returned columns", "logContext", q.logContext, "columns", columns) // Create the slice to scan the row into, with strings for keys and float64s for values. - dest := make([]interface{}, 0, len(columns)) + dest := make([]any, 0, len(columns)) have := make(map[string]bool, len(q.columnTypes)) for i, column := range columns { switch q.columnTypes[column] { case columnTypeKey: - dest = append(dest, new(string)) + dest = append(dest, new(sql.NullString)) have[column] = true case columnTypeValue: - dest = append(dest, new(float64)) + dest = append(dest, new(sql.NullFloat64)) + have[column] = true + case columnTypeTime: + dest = append(dest, new(sql.NullTime)) have[column] = true default: if column == "" { - klog.Warningf("[%s] Unnamed column %d returned by query", q.logContext, i) + slog.Debug("Unnamed column", "logContext", q.logContext, "column", i) } else { - klog.Warningf("[%s] Extra column %q returned by query", q.logContext, column) + slog.Debug("Extra column returned by query", "logContext", q.logContext, "column", column) } - dest = append(dest, new(interface{})) + dest = append(dest, new(any)) } } @@ -169,7 +196,7 @@ func (q *Query) scanDest(rows *sql.Rows) ([]interface{}, errors.WithContext) { // scanRow scans the current row into a map of column name to value, with string values for key columns and float64 // values for value columns, using dest as a buffer. -func (q *Query) scanRow(rows *sql.Rows, dest []interface{}) (map[string]interface{}, errors.WithContext) { +func (q *Query) scanRow(rows *sql.Rows, dest []any) (map[string]any, errors.WithContext) { columns, err := rows.Columns() if err != nil { return nil, errors.Wrap(q.logContext, err) @@ -181,13 +208,24 @@ func (q *Query) scanRow(rows *sql.Rows, dest []interface{}) (map[string]interfac } // Pick all values we're interested in into a map. - result := make(map[string]interface{}, len(q.columnTypes)) + result := make(map[string]any, len(q.columnTypes)) for i, column := range columns { switch q.columnTypes[column] { case columnTypeKey: - result[column] = *dest[i].(*string) + if !dest[i].(*sql.NullString).Valid { + slog.Debug("Key column is NULL", "logContext", q.logContext, "column", column) + } + result[column] = *dest[i].(*sql.NullString) + case columnTypeTime: + if !dest[i].(*sql.NullTime).Valid { + slog.Debug("Time column is NULL", "logContext", q.logContext, "column", column) + } + result[column] = *dest[i].(*sql.NullTime) case columnTypeValue: - result[column] = *dest[i].(*float64) + if !dest[i].(*sql.NullFloat64).Valid { + slog.Debug("Value column is NULL", "logContext", q.logContext, "column", column) + } + result[column] = *dest[i].(*sql.NullFloat64) } } return result, nil diff --git a/reload.go b/reload.go new file mode 100644 index 00000000..837f24bc --- /dev/null +++ b/reload.go @@ -0,0 +1,101 @@ +package sql_exporter + +import ( + "errors" + "log/slog" + + cfg "github.com/burningalchemist/sql_exporter/config" +) + +// Reload function is used to reload the exporter configuration without restarting the exporter +func Reload(e Exporter, configFile *string) error { + slog.Warn("Reloading collectors has started...") + slog.Warn("Connections will not be changed upon the restart of the exporter") + configNext, err := cfg.Load(*configFile) + if err != nil { + slog.Error("Error reading config file", "error", err) + return err + } + + configCurrent := e.Config() + + // Clear current collectors and replace with new ones + if len(configCurrent.Collectors) > 0 { + configCurrent.Collectors = configCurrent.Collectors[:0] + } + configCurrent.Collectors = configNext.Collectors + slog.Debug("Total collector size change", "from", len(configCurrent.Collectors), "to", len(configNext.Collectors)) + + // Reload targets + switch { + case configCurrent.Target != nil && configNext.Target != nil: + if err = reloadTarget(e, configNext, configCurrent); err != nil { + return err + } + case len(configCurrent.Jobs) > 0 && len(configNext.Jobs) > 0: + if err = reloadJobs(e, configNext, configCurrent); err != nil { + return err + } + case configCurrent.Target != nil && len(configNext.Jobs) > 0: + case len(configCurrent.Jobs) > 0 && configNext.Target != nil: + return errors.New("changing scrape mode is not allowed. Please restart the exporter") + default: + slog.Warn("No target or jobs have been found - nothing to reload") + } + return nil +} + +func reloadTarget(e Exporter, nc, cc *cfg.Config) error { + slog.Warn("Recreating target...") + + // We want to preserve DSN from the previous config revision to avoid any connection changes + nc.Target.DSN = cc.Target.DSN + // Apply the new target configuration + cc.Target = nc.Target + // Recreate the target object + target, err := NewTarget("", cc.Target.Name, "", string(cc.Target.DSN), + cc.Target.Collectors(), nil, cc.Globals, cc.Target.EnablePing) + if err != nil { + slog.Error("Error recreating a target", "error", err) + return err + } + + // Populate the target list + e.UpdateTarget([]Target{target}) + slog.Warn("Collectors have been successfully updated for the target") + return nil +} + +func reloadJobs(e Exporter, nc, cc *cfg.Config) error { + slog.Warn("Recreating jobs...") + // We want to preserve `static_configs`` from the previous config revision to avoid any connection changes + for _, currentJob := range cc.Jobs { + for _, newJob := range nc.Jobs { + if newJob.Name == currentJob.Name { + newJob.StaticConfigs = currentJob.StaticConfigs + } + } + } + cc.Jobs = nc.Jobs + var updateErr error + targets := make([]Target, 0, len(cc.Jobs)) + + for _, jobConfigItem := range cc.Jobs { + job, err := NewJob(jobConfigItem, cc.Globals) + if err != nil { + updateErr = err + break + } + targets = append(targets, job.Targets()...) + slog.Debug("Recreated Job", "name", jobConfigItem.Name) + } + + if updateErr != nil { + slog.Error("Error recreating jobs", "error", updateErr) + return updateErr + } + + e.UpdateTarget(targets) + slog.Warn("Collectors have been successfully updated for the jobs") + return nil +} diff --git a/sql.go b/sql.go index 4a89448d..431d7d92 100644 --- a/sql.go +++ b/sql.go @@ -5,11 +5,12 @@ import ( "database/sql" "errors" "fmt" + "log/slog" "net/url" + "os" "time" "github.com/xo/dburl" - "k8s.io/klog/v2" ) // OpenConnection parses a provided DSN, and opens a DB handle ensuring early termination if the context is closed @@ -27,9 +28,14 @@ func OpenConnection(ctx context.Context, logContext, dsn string, maxConns, maxId return nil, err } + driver := url.Driver + if url.GoDriver != "" { + driver = url.GoDriver + } + // Open the DB handle in a separate goroutine so we can terminate early if the context closes. go func() { - conn, err = sql.Open(url.Driver, url.DSN) + conn, err = sql.Open(driver, url.DSN) close(ch) }() @@ -46,12 +52,7 @@ func OpenConnection(ctx context.Context, logContext, dsn string, maxConns, maxId conn.SetMaxOpenConns(maxConns) conn.SetConnMaxLifetime(maxConnLifetime) - if klog.V(1).Enabled() { - if len(logContext) > 0 { - logContext = fmt.Sprintf("[%s] ", logContext) - } - klog.Infof("%sDatabase handle successfully opened with '%s' driver", logContext, url.Driver) - } + slog.Debug("Database handle successfully opened", "logContext", logContext, "driver", driver) return conn, nil } @@ -80,8 +81,8 @@ func PingDB(ctx context.Context, conn *sql.DB) error { // if underlying url parse failed. By default it returns a raw url string in error message, // which most likely contains a password. It's undesired here. func safeParse(rawURL string) (*dburl.URL, error) { - parsed, err := dburl.Parse(rawURL) - //klog.Infof("parsed url: %v", parsed) + + parsed, err := dburl.Parse(expandEnv(rawURL)) if err != nil { if uerr := new(url.Error); errors.As(err, &uerr) { return nil, uerr.Err @@ -91,3 +92,15 @@ func safeParse(rawURL string) (*dburl.URL, error) { } return parsed, nil } + +// expandEnv falls back to the original env variable if not found for better readability +func expandEnv(env string) string { + lookupFunc := func(env string) string { + if value, ok := os.LookupEnv(env); ok { + return value + } + slog.Error("Environment variable is not found, cannot expand", "env", env) + return fmt.Sprintf("$%s", env) + } + return os.Expand(env, lookupFunc) +} diff --git a/target.go b/target.go index 69631e31..37c2eaab 100644 --- a/target.go +++ b/target.go @@ -4,8 +4,8 @@ import ( "context" "database/sql" "database/sql/driver" - "flag" "fmt" + "log/slog" "sort" "sync" "time" @@ -17,8 +17,6 @@ import ( "google.golang.org/protobuf/proto" ) -var enablePing = flag.Bool("config.enable-ping", true, "Enable ping for targets") - const ( // Capacity for the channel to collect metrics. capMetricChan = 1000 @@ -34,11 +32,13 @@ const ( type Target interface { // Collect is the equivalent of prometheus.Collector.Collect(), but takes a context to run in. Collect(ctx context.Context, ch chan<- Metric) + JobGroup() string } // target implements Target. It wraps a sql.DB, which is initially nil but never changes once instantianted. type target struct { name string + jobGroup string dsn string collectors []Collector constLabels prometheus.Labels @@ -46,20 +46,30 @@ type target struct { upDesc MetricDesc scrapeDurationDesc MetricDesc logContext string + enablePing *bool conn *sql.DB } -// NewTarget returns a new Target with the given instance name, data source name, collectors and constant labels. +// NewTarget returns a new Target with the given target name, data source name, collectors and constant labels. // An empty target name means the exporter is running in single target mode: no synthetic metrics will be exported. func NewTarget( - logContext, name, dsn string, ccs []*config.CollectorConfig, constLabels prometheus.Labels, gc *config.GlobalConfig) ( + logContext, tname, jg, dsn string, ccs []*config.CollectorConfig, constLabels prometheus.Labels, gc *config.GlobalConfig, ep *bool) ( Target, errors.WithContext, ) { - if name != "" { - logContext = fmt.Sprintf("%s, target=%q", logContext, name) + if tname != "" { + logContext = TrimMissingCtx(fmt.Sprintf(`%s,target=%s`, logContext, tname)) + if constLabels == nil { + constLabels = prometheus.Labels{config.TargetLabel: tname} + } + } + + if ep == nil { + ep = &config.EnablePing } + slog.Debug("target ping enabled", "logContext", logContext, "enabled", *ep) + // Sort const labels by name to ensure consistent ordering. constLabelPairs := make([]*dto.LabelPair, 0, len(constLabels)) for n, v := range constLabels { constLabelPairs = append(constLabelPairs, &dto.LabelPair{ @@ -81,7 +91,8 @@ func NewTarget( upDesc := NewAutomaticMetricDesc(logContext, upMetricName, upMetricHelp, prometheus.GaugeValue, constLabelPairs) scrapeDurationDesc := NewAutomaticMetricDesc(logContext, scrapeDurationName, scrapeDurationHelp, prometheus.GaugeValue, constLabelPairs) t := target{ - name: name, + name: tname, + jobGroup: jg, dsn: dsn, collectors: collectors, constLabels: constLabels, @@ -89,6 +100,7 @@ func NewTarget( upDesc: upDesc, scrapeDurationDesc: scrapeDurationDesc, logContext: logContext, + enablePing: ep, } return &t, nil } @@ -150,7 +162,7 @@ func (t *target) ping(ctx context.Context) errors.WithContext { // If we have a handle and the context is not closed, test whether the database is up. // FIXME: we ping the database during each request even with cacheCollector. It leads // to additional charges for paid database services. - if t.conn != nil && ctx.Err() == nil && *enablePing { + if t.conn != nil && ctx.Err() == nil && *t.enablePing { var err error // Ping up to max_connections + 1 times as long as the returned error is driver.ErrBadConn, to purge the connection // pool of bad connections. This might happen if the previous scrape timed out and in-flight queries got canceled. @@ -177,3 +189,12 @@ func boolToFloat64(value bool) float64 { } return 0.0 } + +// OfBool returns bool address. +func OfBool(i bool) *bool { + return &i +} + +func (t *target) JobGroup() string { + return t.jobGroup +}